diff options
43 files changed, 935 insertions, 315 deletions
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst index 7b2e69cd7ef0..5c79740a533b 100644 --- a/Documentation/networking/dsa/dsa.rst +++ b/Documentation/networking/dsa/dsa.rst @@ -1104,12 +1104,11 @@ health of the network and for discovery of other nodes. In Linux, both HSR and PRP are implemented in the hsr driver, which instantiates a virtual, stackable network interface with two member ports. The driver only implements the basic roles of DANH (Doubly Attached Node -implementing HSR) and DANP (Doubly Attached Node implementing PRP); the roles -of RedBox and QuadBox are not implemented (therefore, bridging a hsr network -interface with a physical switch port does not produce the expected result). +implementing HSR), DANP (Doubly Attached Node implementing PRP) and RedBox +(allows non-HSR devices to connect to the ring via Interlink ports). -A driver which is able of offloading certain functions of a DANP or DANH should -declare the corresponding netdev features as indicated by the documentation at +A driver which is able of offloading certain functions should declare the +corresponding netdev features as indicated by the documentation at ``Documentation/networking/netdev-features.rst``. Additionally, the following methods must be implemented: @@ -1120,6 +1119,14 @@ methods must be implemented: - ``port_hsr_leave``: function invoked when a given switch port leaves a DANP/DANH and returns to normal operation as a standalone port. +Note that the ``NETIF_F_HW_HSR_DUP`` feature relies on transmission towards +multiple ports, which is generally available whenever the tagging protocol uses +the ``dsa_xmit_port_mask()`` helper function. If the helper is used, the HSR +offload feature should also be set. The ``dsa_port_simple_hsr_join()`` and +``dsa_port_simple_hsr_leave()`` methods can be used as generic implementations +of ``port_hsr_join`` and ``port_hsr_leave``, if this is the only supported +offload feature. + TODO ==== diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 72c85cd34a4e..a1a177713d99 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -872,10 +872,7 @@ static void b53_enable_stp(struct b53_device *dev) static u16 b53_default_pvid(struct b53_device *dev) { - if (is5325(dev) || is5365(dev)) - return 1; - else - return 0; + return 0; } static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) @@ -1699,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) - return -EOPNOTSUPP; - /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of * receiving VLAN tagged frames at all, we can still allow the port to * be configured for egress untagged. @@ -1853,19 +1847,24 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) static void b53_arl_read_entry_25(struct b53_device *dev, struct b53_arl_entry *ent, u8 idx) { + u8 vid_entry; u64 mac_vid; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), + &vid_entry); b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); + b53_arl_to_entry_25(ent, mac_vid, vid_entry); } static void b53_arl_write_entry_25(struct b53_device *dev, const struct b53_arl_entry *ent, u8 idx) { + u8 vid_entry; u64 mac_vid; - b53_arl_from_entry_25(&mac_vid, ent); + b53_arl_from_entry_25(&mac_vid, &vid_entry, ent); + b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry); b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); } @@ -1966,8 +1965,12 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); - if (!is5325m(dev)) - b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + if (!is5325m(dev)) { + if (is5325(dev) || is5365(dev)) + b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + else + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + } /* Issue a read operation for this MAC */ ret = b53_arl_rw_op(dev, 1); @@ -2115,20 +2118,12 @@ static void b53_arl_search_read_25(struct b53_device *dev, u8 idx, struct b53_arl_entry *ent) { u64 mac_vid; + u8 ext; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext); b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); -} - -static void b53_arl_search_read_65(struct b53_device *dev, u8 idx, - struct b53_arl_entry *ent) -{ - u64 mac_vid; - - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); + b53_arl_search_to_entry_25(ent, mac_vid, ext); } static void b53_arl_search_read_89(struct b53_device *dev, u8 idx, @@ -2742,12 +2737,6 @@ static const struct b53_arl_ops b53_arl_ops_25 = { .arl_search_read = b53_arl_search_read_25, }; -static const struct b53_arl_ops b53_arl_ops_65 = { - .arl_read_entry = b53_arl_read_entry_25, - .arl_write_entry = b53_arl_write_entry_25, - .arl_search_read = b53_arl_search_read_65, -}; - static const struct b53_arl_ops b53_arl_ops_89 = { .arl_read_entry = b53_arl_read_entry_89, .arl_write_entry = b53_arl_write_entry_89, @@ -2810,7 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, - .arl_ops = &b53_arl_ops_65, + .arl_ops = &b53_arl_ops_25, }, { .chip_id = BCM5389_DEVICE_ID, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 2bfd0e7c95c9..bd6849e5bb93 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -341,16 +341,18 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, } static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent, - u64 mac_vid) + u64 mac_vid, u8 vid_entry) { memset(ent, 0, sizeof(*ent)); - ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) & - ARLTBL_DATA_PORT_ID_MASK_25; ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); ent->is_age = !!(mac_vid & ARLTBL_AGE_25); ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); u64_to_ether_addr(mac_vid, ent->mac); - ent->vid = mac_vid >> ARLTBL_VID_S_65; + ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >> + ARLTBL_DATA_PORT_ID_S_25; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; + ent->vid = vid_entry; } static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent, @@ -379,20 +381,22 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE; } -static inline void b53_arl_from_entry_25(u64 *mac_vid, +static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry, const struct b53_arl_entry *ent) { *mac_vid = ether_addr_to_u64(ent->mac); - *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) << - ARLTBL_DATA_PORT_ID_S_25; - *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) << - ARLTBL_VID_S_65; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25) + *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25; + else + *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) & + ARLTBL_DATA_PORT_ID_MASK_25; if (ent->is_valid) *mac_vid |= ARLTBL_VALID_25; if (ent->is_static) *mac_vid |= ARLTBL_STATIC_25; if (ent->is_age) *mac_vid |= ARLTBL_AGE_25; + *vid_entry = ent->vid; } static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, @@ -409,6 +413,24 @@ static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE_89; } +static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent, + u64 mac_vid, u8 ext) +{ + memset(ent, 0, sizeof(*ent)); + ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); + ent->is_age = !!(mac_vid & ARLTBL_AGE_25); + ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >> + ARL_SRCH_RSLT_VID_S_25; + ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >> + ARL_SRCH_RSLT_PORT_ID_S_25; + if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII)) + ent->port |= BIT(B53_CPU_PORT_25); + else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; +} + static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent, u64 mac_vid, u16 fwd_entry) { diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 69ebbec932f6..54a278db67c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -329,11 +329,9 @@ #define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 -#define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK 0xfff #define ARLTBL_DATA_PORT_ID_S_25 48 -#define ARLTBL_DATA_PORT_ID_MASK_25 0xf -#define ARLTBL_VID_S_65 53 +#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48) #define ARLTBL_AGE_25 BIT_ULL(61) #define ARLTBL_STATIC_25 BIT_ULL(62) #define ARLTBL_VALID_25 BIT_ULL(63) @@ -353,6 +351,9 @@ #define ARLTBL_STATIC_89 BIT(14) #define ARLTBL_VALID_89 BIT(15) +/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */ +#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30) + /* Maximum number of bin entries in the ARL for all switches */ #define B53_ARLTBL_MAX_BIN_ENTRIES 4 @@ -376,10 +377,16 @@ #define B53_ARL_SRCH_RSLT_MACVID_89 0x33 #define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34 -/* Single register search result on 5325 */ +/* Single register search result on 5325/5365 */ #define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 -/* Single register search result on 5365 */ -#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30 +#define ARL_SRCH_RSLT_PORT_ID_S_25 48 +#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48) +#define ARL_SRCH_RSLT_VID_S_25 53 +#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53) + +/* BCM5325/5365 Search result extend register (8 bit) */ +#define B53_ARL_SRCH_RSLT_EXT_25 0x2c +#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2) /* ARL Search Data Result (32 bit) */ #define B53_ARL_SRCH_RSTL_0 0x68 diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index e0b4758ca583..dd5f263ab984 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1926,6 +1926,8 @@ static const struct dsa_switch_ops hellcreek_ds_ops = { .port_vlan_filtering = hellcreek_vlan_filtering, .setup = hellcreek_setup, .teardown = hellcreek_teardown, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int hellcreek_probe(struct platform_device *pdev) diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c index 122ccea4057b..9da39edf8f57 100644 --- a/drivers/net/dsa/lantiq/lantiq_gswip_common.c +++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c @@ -1652,6 +1652,8 @@ static const struct dsa_switch_ops gswip_switch_ops = { .get_sset_count = gswip_get_sset_count, .set_mac_eee = gswip_set_mac_eee, .support_eee = gswip_support_eee, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; void gswip_disable_switch(struct gswip_priv *priv) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 548b85befbf4..b9423389c2ef 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -3254,7 +3254,7 @@ static int mt7988_setup(struct dsa_switch *ds) return mt7531_setup_common(ds); } -const struct dsa_switch_ops mt7530_switch_ops = { +static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, @@ -3290,8 +3290,9 @@ const struct dsa_switch_ops mt7530_switch_ops = { .set_mac_eee = mt753x_set_mac_eee, .conduit_state_change = mt753x_conduit_state_change, .port_setup_tc = mt753x_setup_tc, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; -EXPORT_SYMBOL_GPL(mt7530_switch_ops); static const struct phylink_mac_ops mt753x_phylink_mac_ops = { .mac_select_pcs = mt753x_phylink_mac_select_pcs, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 7e47cd9af256..3e0090bed298 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -939,7 +939,6 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p, int mt7530_probe_common(struct mt7530_priv *priv); void mt7530_remove_common(struct mt7530_priv *priv); -extern const struct dsa_switch_ops mt7530_switch_ops; extern const struct mt753x_info mt753x_table[]; #endif /* __MT7530_H */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 294312b58e4f..9c8ac14cd4f5 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -297,6 +297,8 @@ static const struct dsa_switch_ops mv88e6060_switch_ops = { .phy_read = mv88e6060_phy_read, .phy_write = mv88e6060_phy_write, .phylink_get_caps = mv88e6060_phylink_get_caps, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int mv88e6060_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 20ab558fde24..9e5ede932b42 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1233,6 +1233,7 @@ static int felix_port_enable(struct dsa_switch *ds, int port, { struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); if (!dsa_port_is_user(dp)) return 0; @@ -1246,7 +1247,25 @@ static int felix_port_enable(struct dsa_switch *ds, int port, } } - return 0; + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; + + return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL); +} + +static void felix_port_disable(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (!dsa_port_is_user(dp)) + return; + + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return; + + dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev); } static void felix_port_qos_map_init(struct ocelot *ocelot, int port) @@ -2232,6 +2251,52 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port, ocelot_port_get_mm_stats(ocelot, port, stats); } +/* Depending on port type, we may be able to support the offload later (with + * the "ocelot"/"seville" tagging protocols), or never. + * If we return 0, the dp->hsr_dev reference is kept for later; if we return + * -EOPNOTSUPP, it is cleared (which helps to not bother + * dsa_port_simple_hsr_leave() with an offload that didn't pass validation). + */ +static int felix_port_hsr_join(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) { + int err; + + err = dsa_port_simple_hsr_validate(ds, port, hsr, extack); + if (err) + return err; + + NL_SET_ERR_MSG_MOD(extack, + "Offloading not supported with \"ocelot-8021q\""); + return 0; + } + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_join(ds, port, hsr, extack); +} + +static int felix_port_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_leave(ds, port, hsr); +} + static const struct phylink_mac_ops felix_phylink_mac_ops = { .mac_select_pcs = felix_phylink_mac_select_pcs, .mac_config = felix_phylink_mac_config, @@ -2262,6 +2327,7 @@ static const struct dsa_switch_ops felix_switch_ops = { .get_ts_info = felix_get_ts_info, .phylink_get_caps = felix_phylink_get_caps, .port_enable = felix_port_enable, + .port_disable = felix_port_disable, .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, @@ -2318,6 +2384,8 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_del_dscp_prio = felix_port_del_dscp_prio, .port_set_host_flood = felix_port_set_host_flood, .port_change_conduit = felix_port_change_conduit, + .port_hsr_join = felix_port_hsr_join, + .port_hsr_leave = felix_port_hsr_leave, }; int felix_register_switch(struct device *dev, resource_size_t switch_base, diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 964a56ee16cc..c575e164368c 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -2134,6 +2134,8 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = { .get_stats64 = rtl8365mb_get_stats64, .port_change_mtu = rtl8365mb_port_change_mtu, .port_max_mtu = rtl8365mb_port_max_mtu, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static const struct realtek_ops rtl8365mb_ops = { diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 8bdb52b5fdcb..d96ae72b0a5c 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -1815,6 +1815,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = { .port_fast_age = rtl8366rb_port_fast_age, .port_change_mtu = rtl8366rb_change_mtu, .port_max_mtu = rtl8366rb_max_mtu, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static const struct realtek_ops rtl8366rb_ops = { diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 1635255f58e4..4d857e3be10b 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -1035,6 +1035,8 @@ static const struct dsa_switch_ops a5psw_switch_ops = { .port_fdb_add = a5psw_port_fdb_add, .port_fdb_del = a5psw_port_fdb_del, .port_fdb_dump = a5psw_port_fdb_dump, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int a5psw_mdio_wait_busy(struct a5psw *a5psw) diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 4dbcc49a9e52..0a05f4156ef4 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -566,6 +566,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, struct xrs700x *priv = ds->priv; struct net_device *user; int ret, i, hsr_pair[2]; + enum hsr_port_type type; enum hsr_version ver; bool fwd = false; @@ -589,6 +590,16 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, return -EOPNOTSUPP; } + ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type); + if (ret) + return ret; + + if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) { + NL_SET_ERR_MSG_MOD(extack, + "Only HSR slave ports can be offloaded"); + return -EOPNOTSUPP; + } + dsa_hsr_foreach_port(dp, ds, hsr) { if (dp->index != port) { partner = dp; diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c index ebfd34f72314..1c511f5dc6ab 100644 --- a/drivers/net/dsa/yt921x.c +++ b/drivers/net/dsa/yt921x.c @@ -2098,6 +2098,117 @@ yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, return res; } +static int +yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port, + const struct switchdev_mst_state *st) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + switch (st->state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge, + const struct switchdev_vlan_msti *msti) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u64 mask64; + u64 ctrl64; + int res; + + if (!msti->vid) + return -EINVAL; + if (!msti->msti || msti->msti >= YT921X_MSTI_NUM) + return -EINVAL; + + mask64 = YT921X_VLAN_CTRL_STP_ID_M; + ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti); + + mutex_lock(&priv->reg_lock); + res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid), + mask64, ctrl64); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void +yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct dsa_port *dp = dsa_to_port(ds, port); + struct device *dev = to_device(priv); + bool learning; + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + learning = false; + switch (state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + learning = dp->learning; + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + learning = dp->learning; + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + do { + res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl); + if (res) + break; + + mask = YT921X_PORT_LEARN_DIS; + ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0; + res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port), + mask, ctrl); + } while (0); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for", + port, res); +} + static int yt921x_port_down(struct yt921x_priv *priv, int port) { u32 mask; @@ -2763,6 +2874,9 @@ static const struct dsa_switch_ops yt921x_dsa_switch_ops = { /* mtu */ .port_change_mtu = yt921x_dsa_port_change_mtu, .port_max_mtu = yt921x_dsa_port_max_mtu, + /* hsr */ + .port_hsr_leave = dsa_port_simple_hsr_leave, + .port_hsr_join = dsa_port_simple_hsr_join, /* mirror */ .port_mirror_del = yt921x_dsa_port_mirror_del, .port_mirror_add = yt921x_dsa_port_mirror_add, @@ -2783,6 +2897,10 @@ static const struct dsa_switch_ops yt921x_dsa_switch_ops = { .port_bridge_flags = yt921x_dsa_port_bridge_flags, .port_bridge_leave = yt921x_dsa_port_bridge_leave, .port_bridge_join = yt921x_dsa_port_bridge_join, + /* mst */ + .port_mst_state_set = yt921x_dsa_port_mst_state_set, + .vlan_msti_set = yt921x_dsa_vlan_msti_set, + .port_stp_state_set = yt921x_dsa_port_stp_state_set, /* port */ .get_tag_protocol = yt921x_dsa_get_tag_protocol, .phylink_get_caps = yt921x_dsa_phylink_get_caps, @@ -2855,6 +2973,8 @@ static int yt921x_mdio_probe(struct mdio_device *mdiodev) ds->assisted_learning_on_cpu_port = true; ds->priv = priv; ds->ops = &yt921x_dsa_switch_ops; + ds->ageing_time_min = 1 * 5000; + ds->ageing_time_max = U16_MAX * 5000; ds->phylink_mac_ops = &yt921x_phylink_mac_ops; ds->num_ports = YT921X_PORT_NUM; diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h index 44719d841d40..61bb0ab3b09a 100644 --- a/drivers/net/dsa/yt921x.h +++ b/drivers/net/dsa/yt921x.h @@ -274,6 +274,13 @@ #define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port) #define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port)) #define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port) +#define YT921X_STPn(n) (0x18038c + 4 * (n)) +#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port)) +#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port))) +#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0) +#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1) +#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2) +#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3) #define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port)) #define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22) #define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21) @@ -382,23 +389,23 @@ #define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0) #define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan)) -#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK(50, 40) +#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40) #define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x)) -#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT((port) + 40) -#define YT921X_VLAN_CTRL_STP_ID_M GENMASK(39, 36) +#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40) +#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36) #define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x)) -#define YT921X_VLAN_CTRL_SVLAN_EN BIT(35) -#define YT921X_VLAN_CTRL_FID_M GENMASK(34, 23) +#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35) +#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23) #define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x)) -#define YT921X_VLAN_CTRL_LEARN_DIS BIT(22) -#define YT921X_VLAN_CTRL_INT_PRI_EN BIT(21) -#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK(20, 18) -#define YT921X_VLAN_CTRL_PORTS_M GENMASK(17, 7) +#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22) +#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21) +#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18) +#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7) #define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x)) -#define YT921X_VLAN_CTRL_PORTn(port) BIT((port) + 7) -#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT(6) -#define YT921X_VLAN_CTRL_METER_EN BIT(5) -#define YT921X_VLAN_CTRL_METER_ID_M GENMASK(4, 0) +#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7) +#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6) +#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5) +#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0) #define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */ #define YT921X_TPID_IGR_TPID_M GENMASK(15, 0) @@ -449,6 +456,8 @@ enum yt921x_fdb_entry_status { YT921X_FDB_ENTRY_STATUS_STATIC = 7, }; +#define YT921X_MSTI_NUM 16 + #define YT9215_MAJOR 0x9002 #define YT9218_MAJOR 0x9001 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0653e69f0ef7..3ddd896d6987 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -367,10 +367,11 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data) static void xgbe_isr_bh_work(struct work_struct *work) { struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work); + unsigned int mac_isr, mac_tssr, mac_mdioisr; struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_channel *channel; + bool per_ch_irq, ti, ri, rbu, fbe; unsigned int dma_isr, dma_ch_isr; - unsigned int mac_isr, mac_tssr, mac_mdioisr; + struct xgbe_channel *channel; unsigned int i; /* The DMA interrupt status register also reports MAC and MTL @@ -384,43 +385,73 @@ static void xgbe_isr_bh_work(struct work_struct *work) netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { + bool schedule_napi = false; + struct napi_struct *napi; + if (!(dma_isr & (1 << i))) continue; channel = pdata->channel[i]; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + + /* Precompute flags once */ + ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI); + ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI); + rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU); + fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", i, dma_ch_isr); - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. + per_ch_irq = pdata->per_channel_irq; + + /* + * Decide which NAPI to use and whether to schedule: + * - When not using per-channel IRQs: schedule on global NAPI + * if TI or RI are set. + * - RBU should also trigger NAPI (either per-channel or global) + * to allow refill. */ - if (!pdata->per_channel_irq && - (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || - XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { - if (napi_schedule_prep(&pdata->napi)) { - /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (!per_ch_irq && (ti || ri)) + schedule_napi = true; - /* Turn on polling */ - __napi_schedule(&pdata->napi); + if (rbu) { + schedule_napi = true; + pdata->ext_stats.rx_buffer_unavailable++; + } + + napi = per_ch_irq ? &channel->napi : &pdata->napi; + + if (schedule_napi && napi_schedule_prep(napi)) { + /* Disable interrupts appropriately before polling */ + if (per_ch_irq) { + if (pdata->channel_irq_mode) + xgbe_disable_rx_tx_int(pdata, channel); + else + disable_irq_nosync(channel->dma_irq); + } else { + xgbe_disable_rx_tx_ints(pdata); } + + /* Turn on polling */ + __napi_schedule(napi); } else { - /* Don't clear Rx/Tx status if doing per channel DMA - * interrupts, these will be cleared by the ISR for - * per channel DMA interrupts. + /* + * Don't clear Rx/Tx status if doing per-channel DMA + * interrupts; those bits will be serviced/cleared by + * the per-channel ISR/NAPI. In non-per-channel mode + * when we're not scheduling NAPI here, ensure we don't + * accidentally clear TI/RI in HW: zero them in the + * local copy so that the eventual write-back does not + * clear TI/RI. */ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); } - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) - pdata->ext_stats.rx_buffer_unavailable++; - /* Restart the device on a Fatal Bus Error */ - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) + if (fbe) schedule_work(&pdata->restart_work); /* Clear interrupt signals */ diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 6e4f17142519..846d58c769ea 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -41,7 +41,7 @@ module_param(tx_flow, int, 0); module_param(rx_flow, int, 0); module_param(copy_thresh, int, 0); module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ -module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ +module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ @@ -262,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { - /* default Auto-Negotiation for fiber deivices */ + /* default Auto-Negotiation for fiber devices */ if (np->an_enable == 2) { np->an_enable = 1; } @@ -887,7 +887,7 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - /* Ttransmit Underrun */ + /* Transmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); @@ -1083,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status) get_stats (dev); } - /* PCI Error, a catastronphic error related to the bus interface + /* PCI Error, a catastrophic error related to the bus interface occurs, set GlobalReset and HostReset to reset. */ if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 4788cc94639d..9ebf7a6db93e 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -270,7 +270,7 @@ enum _pcs_reg { PCS_ESR = 15, }; -/* IEEE Extened Status Register */ +/* IEEE Extended Status Register */ enum _mii_esr { MII_ESR_1000BX_FD = 0x8000, MII_ESR_1000BX_HD = 0x4000, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index cf8f14ce4cd5..fddf7c207f8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -31,14 +31,15 @@ */ #include <linux/device.h> #include <linux/netdevice.h> +#include <linux/units.h> #include "en.h" #include "en/port.h" #include "en/port_buffer.h" #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ -#define MLX5E_100MB (100000) -#define MLX5E_1GB (1000000) +#define MLX5E_100MB_TO_KB (100 * MEGA / KILO) +#define MLX5E_1GB_TO_KB (GIGA / KILO) #define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_DOWN 0 @@ -572,10 +573,10 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, for (i = 0; i <= mlx5_max_tc(mdev); i++) { switch (max_bw_unit[i]) { case MLX5_100_MBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB; break; case MLX5_GBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB; break; case MLX5_BW_NO_LIMIT: break; @@ -595,8 +596,8 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; - __u64 upper_limit_mbps; - __u64 upper_limit_gbps; + u64 upper_limit_100mbps; + u64 upper_limit_gbps; int i; struct { int scale; @@ -614,22 +615,22 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, memset(max_bw_value, 0, sizeof(max_bw_value)); memset(max_bw_unit, 0, sizeof(max_bw_unit)); - upper_limit_mbps = 255 * MLX5E_100MB; - upper_limit_gbps = 255 * MLX5E_1GB; + upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB; + upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB; for (i = 0; i <= mlx5_max_tc(mdev); i++) { if (!maxrate->tc_maxrate[i]) { max_bw_unit[i] = MLX5_BW_NO_LIMIT; continue; } - if (maxrate->tc_maxrate[i] <= upper_limit_mbps) { + if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_100MB); + MLX5E_100MB_TO_KB); max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; max_bw_unit[i] = MLX5_100_MBPS_UNIT; } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_1GB); + MLX5E_1GB_TO_KB); max_bw_unit[i] = MLX5_GBPS_UNIT; } else { netdev_err(netdev, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 8fd70b34807a..efb4e412ec7e 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -15,6 +15,20 @@ struct dentry *mana_debugfs_root; +struct mana_dev_recovery { + struct list_head list; + struct pci_dev *pdev; + enum gdma_eqe_type type; +}; + +static struct mana_dev_recovery_work { + struct list_head dev_list; + struct delayed_work work; + + /* Lock for dev_list above */ + spinlock_t lock; +} mana_dev_recovery_work; + static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { return readl(g->bar0_va + offset); @@ -387,6 +401,25 @@ EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA"); #define MANA_SERVICE_PERIOD 10 +static void mana_serv_rescan(struct pci_dev *pdev) +{ + struct pci_bus *parent; + + pci_lock_rescan_remove(); + + parent = pdev->bus; + if (!parent) { + dev_err(&pdev->dev, "MANA service: no parent bus\n"); + goto out; + } + + pci_stop_and_remove_bus_device(pdev); + pci_rescan_bus(parent); + +out: + pci_unlock_rescan_remove(); +} + static void mana_serv_fpga(struct pci_dev *pdev) { struct pci_bus *bus, *parent; @@ -419,9 +452,12 @@ static void mana_serv_reset(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); struct hw_channel_context *hwc; + int ret; if (!gc) { - dev_err(&pdev->dev, "MANA service: no GC\n"); + /* Perform PCI rescan on device if GC is not set up */ + dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n"); + mana_serv_rescan(pdev); return; } @@ -440,9 +476,18 @@ static void mana_serv_reset(struct pci_dev *pdev) msleep(MANA_SERVICE_PERIOD * 1000); - mana_gd_resume(pdev); + ret = mana_gd_resume(pdev); + if (ret == -ETIMEDOUT || ret == -EPROTO) { + /* Perform PCI rescan on device if we failed on HWC */ + dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n"); + mana_serv_rescan(pdev); + goto out; + } - dev_info(&pdev->dev, "MANA reset cycle completed\n"); + if (ret) + dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret); + else + dev_info(&pdev->dev, "MANA reset cycle completed\n"); out: gc->in_service = false; @@ -454,18 +499,9 @@ struct mana_serv_work { enum gdma_eqe_type type; }; -static void mana_serv_func(struct work_struct *w) +static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev) { - struct mana_serv_work *mns_wk; - struct pci_dev *pdev; - - mns_wk = container_of(w, struct mana_serv_work, serv_work); - pdev = mns_wk->pdev; - - if (!pdev) - goto out; - - switch (mns_wk->type) { + switch (type) { case GDMA_EQE_HWC_FPGA_RECONFIG: mana_serv_fpga(pdev); break; @@ -475,12 +511,48 @@ static void mana_serv_func(struct work_struct *w) break; default: - dev_err(&pdev->dev, "MANA service: unknown type %d\n", - mns_wk->type); + dev_err(&pdev->dev, "MANA service: unknown type %d\n", type); break; } +} + +static void mana_recovery_delayed_func(struct work_struct *w) +{ + struct mana_dev_recovery_work *work; + struct mana_dev_recovery *dev; + unsigned long flags; + + work = container_of(w, struct mana_dev_recovery_work, work.work); + + spin_lock_irqsave(&work->lock, flags); + + while (!list_empty(&work->dev_list)) { + dev = list_first_entry(&work->dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + spin_unlock_irqrestore(&work->lock, flags); + + mana_do_service(dev->type, dev->pdev); + pci_dev_put(dev->pdev); + kfree(dev); + + spin_lock_irqsave(&work->lock, flags); + } + + spin_unlock_irqrestore(&work->lock, flags); +} + +static void mana_serv_func(struct work_struct *w) +{ + struct mana_serv_work *mns_wk; + struct pci_dev *pdev; + + mns_wk = container_of(w, struct mana_serv_work, serv_work); + pdev = mns_wk->pdev; + + if (pdev) + mana_do_service(mns_wk->type, pdev); -out: pci_dev_put(pdev); kfree(mns_wk); module_put(THIS_MODULE); @@ -541,6 +613,17 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_RESET_REQUEST: dev_info(gc->dev, "Recv MANA service type:%d\n", type); + if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + /* + * Device is in probe and we received a hardware reset + * event, the probe function will detect that the flag + * has changed and perform service procedure. + */ + dev_info(gc->dev, + "Service is to be processed in probe\n"); + break; + } + if (gc->in_service) { dev_info(gc->dev, "Already in service\n"); break; @@ -1938,8 +2021,19 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_mana; + /* + * If a hardware reset event has occurred over HWC during probe, + * rollback and perform hardware reset procedure. + */ + if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + err = -EPROTO; + goto cleanup_mana_rdma; + } + return 0; +cleanup_mana_rdma: + mana_rdma_remove(&gc->mana_ib); cleanup_mana: mana_remove(&gc->mana, false); cleanup_gd: @@ -1963,6 +2057,35 @@ release_region: disable_dev: pci_disable_device(pdev); dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + + /* + * Hardware could be in recovery mode and the HWC returns TIMEDOUT or + * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or + * we received a hardware reset event over HWC interrupt. In this case, + * perform the device recovery procedure after MANA_SERVICE_PERIOD + * seconds. + */ + if (err == -ETIMEDOUT || err == -EPROTO) { + struct mana_dev_recovery *dev; + unsigned long flags; + + dev_info(&pdev->dev, "Start MANA recovery mode\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return err; + + dev->pdev = pci_dev_get(pdev); + dev->type = GDMA_EQE_HWC_RESET_REQUEST; + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list); + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + + schedule_delayed_work(&mana_dev_recovery_work.work, + secs_to_jiffies(MANA_SERVICE_PERIOD)); + } + return err; } @@ -2067,6 +2190,10 @@ static int __init mana_driver_init(void) { int err; + INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list); + spin_lock_init(&mana_dev_recovery_work.lock); + INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func); + mana_debugfs_root = debugfs_create_dir("mana", NULL); err = pci_register_driver(&mana_driver); @@ -2080,6 +2207,21 @@ static int __init mana_driver_init(void) static void __exit mana_driver_exit(void) { + struct mana_dev_recovery *dev; + unsigned long flags; + + disable_delayed_work_sync(&mana_dev_recovery_work.work); + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + while (!list_empty(&mana_dev_recovery_work.dev_list)) { + dev = list_first_entry(&mana_dev_recovery_work.dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + pci_dev_put(dev->pdev); + kfree(dev); + } + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + pci_unregister_driver(&mana_driver); debugfs_remove(mana_debugfs_root); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 630319604211..405e91eb3141 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1512,7 +1512,6 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52: return RTL_DASH_EP; case RTL_GIGA_MAC_VER_66: - case RTL_GIGA_MAC_VER_80: return RTL_DASH_25_BP; default: return RTL_DASH_NONE; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 591866fc9055..d35d1f3c10a1 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -364,6 +364,7 @@ static int gelic_card_init_chain(struct gelic_card *card, * gelic_descr_prepare_rx - reinitializes a rx descriptor * @card: card structure * @descr: descriptor to re-init + * @napi_mode: is it running in napi poll * * return 0 on success, <0 on failure * @@ -374,7 +375,8 @@ static int gelic_card_init_chain(struct gelic_card *card, * must be a multiple of GELIC_NET_RXBUF_ALIGN. */ static int gelic_descr_prepare_rx(struct gelic_card *card, - struct gelic_descr *descr) + struct gelic_descr *descr, + bool napi_mode) { static const unsigned int rx_skb_size = ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + @@ -392,7 +394,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, descr->hw_regs.payload.dev_addr = 0; descr->hw_regs.payload.size = 0; - descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); + if (napi_mode) + descr->skb = napi_alloc_skb(&card->napi, rx_skb_size); + else + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); if (!descr->skb) { descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ return -ENOMEM; @@ -464,7 +469,7 @@ static int gelic_card_fill_rx_chain(struct gelic_card *card) do { if (!descr->skb) { - ret = gelic_descr_prepare_rx(card, descr); + ret = gelic_descr_prepare_rx(card, descr, false); if (ret) goto rewind; } @@ -964,7 +969,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr, netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ - netif_receive_skb(skb); + napi_gro_receive(&card->napi, skb); } /** @@ -1069,7 +1074,7 @@ refill: /* * this call can fail, propagate the error */ - prepare_rx_ret = gelic_descr_prepare_rx(card, descr); + prepare_rx_ret = gelic_descr_prepare_rx(card, descr, true); if (prepare_rx_ret) return prepare_rx_ret; diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 677f92883976..73e1364ad1ed 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -40,12 +40,12 @@ static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs, { switch (interface) { case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_2500BASEX: return LINK_INBAND_DISABLE; case PHY_INTERFACE_MODE_USXGMII: @@ -80,27 +80,6 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, phylink_decode_usxgmii_word(state, lpa); } -static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs, - struct phylink_link_state *state) -{ - int bmsr; - - bmsr = mdiodev_read(pcs, MII_BMSR); - if (bmsr < 0) { - state->link = false; - return; - } - - state->link = !!(bmsr & BMSR_LSTATUS); - state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); - if (!state->link) - return; - - state->speed = SPEED_2500; - state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX; - state->duplex = DUPLEX_FULL; -} - static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { @@ -108,13 +87,11 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, switch (state->interface) { case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state); break; - case PHY_INTERFACE_MODE_2500BASEX: - lynx_pcs_get_state_2500basex(lynx->mdio, state); - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: lynx_pcs_get_state_usxgmii(lynx->mdio, state); @@ -152,7 +129,8 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs, mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16); } - if (interface == PHY_INTERFACE_MODE_1000BASEX) { + if (interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { if_mode = 0; } else { /* SGMII and QSGMII */ @@ -202,15 +180,9 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_2500BASEX: return lynx_pcs_config_giga(lynx->mdio, ifmode, advertising, neg_mode); - case PHY_INTERFACE_MODE_2500BASEX: - if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { - dev_err(&lynx->mdio->dev, - "AN not supported on 3.125GHz SerDes lane\n"); - return -EOPNOTSUPP; - } - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: return lynx_pcs_config_usxgmii(lynx->mdio, ifmode, advertising, @@ -271,42 +243,6 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, if_mode); } -/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane - * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have - * auto-negotiation of any link parameters. Electrically it is compatible with - * a single lane of XAUI. - * The hardware reference manual wants to call this mode SGMII, but it isn't - * really, since the fundamental features of SGMII: - * - Downgrading the link speed by duplicating symbols - * - Auto-negotiation - * are not there. - * The speed is configured at 1000 in the IF_MODE because the clock frequency - * is actually given by a PLL configured in the Reset Configuration Word (RCW). - * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o - * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a - * lower link speed on line side, the system-side interface remains fixed at - * 2500 Mbps and we do rate adaptation through pause frames. - */ -static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs, - unsigned int neg_mode, - int speed, int duplex) -{ - u16 if_mode = 0; - - if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { - dev_err(&pcs->dev, "AN not supported for 2500BaseX\n"); - return; - } - - if (duplex == DUPLEX_HALF) - if_mode |= IF_MODE_HALF_DUPLEX; - if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500); - - mdiodev_modify(pcs, IF_MODE, - IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK, - if_mode); -} - static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex) @@ -318,9 +254,6 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, case PHY_INTERFACE_MODE_QSGMII: lynx_pcs_link_up_sgmii(lynx->mdio, neg_mode, speed, duplex); break; - case PHY_INTERFACE_MODE_2500BASEX: - lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex); - break; case PHY_INTERFACE_MODE_USXGMII: case PHY_INTERFACE_MODE_10G_QXGMII: /* At the moment, only in-band AN is supported for USXGMII diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h index 6850a3f0b31e..449d0fb67093 100644 --- a/drivers/net/phy/mdio-open-alliance.h +++ b/drivers/net/phy/mdio-open-alliance.h @@ -56,6 +56,8 @@ /* Advanced Diagnostic Features Capability Register*/ #define MDIO_OATC14_ADFCAP 0xcc00 #define OATC14_ADFCAP_HDD_CAPABILITY GENMASK(10, 8) +#define OATC14_ADFCAP_SQIPLUS_CAPABILITY GENMASK(4, 1) +#define OATC14_ADFCAP_SQI_CAPABILITY BIT(0) /* Harness Defect Detection Register */ #define MDIO_OATC14_HDD 0xcc01 @@ -65,6 +67,17 @@ #define OATC14_HDD_VALID BIT(2) #define OATC14_HDD_SHORT_OPEN_STATUS GENMASK(1, 0) +/* Dynamic Channel Quality SQI Register */ +#define MDIO_OATC14_DCQ_SQI 0xcc03 +#define OATC14_DCQ_SQI_VALUE GENMASK(2, 0) + +/* Dynamic Channel Quality SQI Plus Register */ +#define MDIO_OATC14_DCQ_SQIPLUS 0xcc04 +#define OATC14_DCQ_SQIPLUS_VALUE GENMASK(7, 0) + +/* SQI is supported using 3 bits means 8 levels (0-7) */ +#define OATC14_SQI_MAX_LEVEL 7 + /* Bus Short/Open Status: * 0 0 - no fault; everything is ok. (Default) * 0 1 - detected as an open or missing termination(s) diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index 5a0a66778977..e601d56b2507 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -575,6 +575,8 @@ static struct phy_driver microchip_t1s_driver[] = { .get_plca_status = genphy_c45_plca_get_status, .cable_test_start = genphy_c45_oatc14_cable_test_start, .cable_test_get_status = genphy_c45_oatc14_cable_test_get_status, + .get_sqi = genphy_c45_oatc14_get_sqi, + .get_sqi_max = genphy_c45_oatc14_get_sqi_max, }, { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB), diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index f5e23b53994f..d48aa7231b37 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1695,3 +1695,140 @@ int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev) OATC14_HDD_START_CONTROL); } EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_start); + +/** + * oatc14_update_sqi_capability - Read and update OATC14 10Base-T1S PHY SQI/SQI+ + * capability + * @phydev: Pointer to the PHY device structure + * + * This helper reads the OATC14 ADFCAP capability register to determine whether + * the PHY supports SQI or SQI+ reporting. + * + * SQI+ capability is detected first. The SQI+ field indicates the number of + * valid MSBs (3–8), corresponding to 8–256 SQI+ levels. When present, the + * function stores the number of SQI+ bits and computes the maximum SQI+ value + * as (2^bits - 1). + * + * If SQI+ is not supported, the function checks for basic SQI capability, + * which provides 0–7 SQI levels. + * + * On success, the capability information is stored in + * @phydev->oatc14_sqi_capability and marked as updated. + * + * Return: + * * 0 - capability successfully read and stored + * * -EOPNOTSUPP - SQI/SQI+ not supported by this PHY + * * Negative errno on read failure + */ +static int oatc14_update_sqi_capability(struct phy_device *phydev) +{ + u8 bits; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_ADFCAP); + if (ret < 0) + return ret; + + /* Check for SQI+ capability + * 0 - SQI+ is not supported + * (3-8) bits for (8-256) SQI+ levels supported + */ + bits = FIELD_GET(OATC14_ADFCAP_SQIPLUS_CAPABILITY, ret); + if (bits) { + phydev->oatc14_sqi_capability.sqiplus_bits = bits; + /* Max sqi+ level supported: (2 ^ bits) - 1 */ + phydev->oatc14_sqi_capability.sqi_max = BIT(bits) - 1; + goto update_done; + } + + /* Check for SQI capability + * 0 - SQI is not supported + * 1 - SQI is supported (0-7 levels) + */ + if (ret & OATC14_ADFCAP_SQI_CAPABILITY) { + phydev->oatc14_sqi_capability.sqi_max = OATC14_SQI_MAX_LEVEL; + goto update_done; + } + + return -EOPNOTSUPP; + +update_done: + phydev->oatc14_sqi_capability.updated = true; + return 0; +} + +/** + * genphy_c45_oatc14_get_sqi_max - Get maximum supported SQI or SQI+ level of + * OATC14 10Base-T1S PHY + * @phydev: pointer to the PHY device structure + * + * This function returns the maximum supported Signal Quality Indicator (SQI) or + * SQI+ level. The SQI capability is updated on first invocation if it has not + * already been updated. + * + * Return: + * * Maximum SQI/SQI+ level supported + * * Negative errno on capability read failure + */ +int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev) +{ + int ret; + + if (!phydev->oatc14_sqi_capability.updated) { + ret = oatc14_update_sqi_capability(phydev); + if (ret) + return ret; + } + + return phydev->oatc14_sqi_capability.sqi_max; +} +EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi_max); + +/** + * genphy_c45_oatc14_get_sqi - Get Signal Quality Indicator (SQI) from an OATC14 + * 10Base-T1S PHY + * @phydev: pointer to the PHY device structure + * + * This function reads the SQI+ or SQI value from an OATC14-compatible + * 10Base-T1S PHY. If SQI+ capability is supported, the function returns the + * extended SQI+ value; otherwise, it returns the basic SQI value. The SQI + * capability is updated on first invocation if it has not already been updated. + * + * Return: + * * SQI/SQI+ value on success + * * Negative errno on read failure + */ +int genphy_c45_oatc14_get_sqi(struct phy_device *phydev) +{ + u8 shift; + int ret; + + if (!phydev->oatc14_sqi_capability.updated) { + ret = oatc14_update_sqi_capability(phydev); + if (ret) + return ret; + } + + /* Calculate and return SQI+ value if supported */ + if (phydev->oatc14_sqi_capability.sqiplus_bits) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_DCQ_SQIPLUS); + if (ret < 0) + return ret; + + /* SQI+ uses N MSBs out of 8 bits, left-aligned with padding 1's + * Calculate the right-shift needed to isolate the N bits. + */ + shift = 8 - phydev->oatc14_sqi_capability.sqiplus_bits; + + return (ret & OATC14_DCQ_SQIPLUS_VALUE) >> shift; + } + + /* Read and return SQI value if SQI+ capability is not supported */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_DCQ_SQI); + if (ret < 0) + return ret; + + return ret & OATC14_DCQ_SQI_VALUE; +} +EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi); diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index 0a41d2b45d8c..4d5c9ae8f221 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1231,7 +1231,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, if (err) { if (dev->flags & IFF_PROMISC) dev_set_promiscuity(port_dev, -1); - goto err_set_slave_promisc; + goto err_set_slave_allmulti; } } @@ -1258,6 +1258,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return 0; err_set_dev_type: +err_set_slave_allmulti: err_set_slave_promisc: __team_option_inst_del_port(team, port); diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h index d7941fd88032..f4cf2dd36d19 100644 --- a/include/linux/if_hsr.h +++ b/include/linux/if_hsr.h @@ -43,6 +43,8 @@ extern bool is_hsr_master(struct net_device *dev); extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver); struct net_device *hsr_get_port_ndev(struct net_device *ndev, enum hsr_port_type pt); +int hsr_get_port_type(struct net_device *hsr_dev, struct net_device *dev, + enum hsr_port_type *type); #else static inline bool is_hsr_master(struct net_device *dev) { @@ -59,6 +61,13 @@ static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev, { return ERR_PTR(-EINVAL); } + +static inline int hsr_get_port_type(struct net_device *hsr_dev, + struct net_device *dev, + enum hsr_port_type *type) +{ + return -EINVAL; +} #endif /* CONFIG_HSR */ #endif /*_LINUX_IF_HSR_H_*/ diff --git a/include/linux/phy.h b/include/linux/phy.h index 059a104223c4..fbbe028cc4b7 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -531,6 +531,30 @@ struct macsec_context; struct macsec_ops; /** + * struct phy_oatc14_sqi_capability - SQI capability information for OATC14 + * 10Base-T1S PHY + * @updated: Indicates whether the SQI capability fields have been updated. + * @sqi_max: Maximum supported Signal Quality Indicator (SQI) level reported by + * the PHY. + * @sqiplus_bits: Bits for SQI+ levels supported by the PHY. + * 0 - SQI+ is not supported + * 3 - SQI+ is supported, using 3 bits (8 levels) + * 4 - SQI+ is supported, using 4 bits (16 levels) + * 5 - SQI+ is supported, using 5 bits (32 levels) + * 6 - SQI+ is supported, using 6 bits (64 levels) + * 7 - SQI+ is supported, using 7 bits (128 levels) + * 8 - SQI+ is supported, using 8 bits (256 levels) + * + * This structure is used by the OATC14 10Base-T1S PHY driver to store the SQI + * and SQI+ capability information retrieved from the PHY. + */ +struct phy_oatc14_sqi_capability { + bool updated; + int sqi_max; + u8 sqiplus_bits; +}; + +/** * struct phy_device - An instance of a PHY * * @mdio: MDIO bus this PHY is on @@ -626,6 +650,7 @@ struct macsec_ops; * @link_down_events: Number of times link was lost * @shared: Pointer to private data shared by phys in one package * @priv: Pointer to driver private data + * @oatc14_sqi_capability: SQI capability information for OATC14 10Base-T1S PHY * * interrupts currently only supports enabled or disabled, * but could be changed in the future to support enabling @@ -772,6 +797,8 @@ struct phy_device { /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif + + struct phy_oatc14_sqi_capability oatc14_sqi_capability; }; /* Generic phy_device::dev_flags */ @@ -2257,6 +2284,8 @@ int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev, bool *finished); +int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev); +int genphy_c45_oatc14_get_sqi(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); diff --git a/include/net/dsa.h b/include/net/dsa.h index e40cdc12f7f3..cced1a866757 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -1322,6 +1322,15 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db); +int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack); +int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack); +int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr); + /* Keep inline for faster access in hot path */ static inline bool netdev_uses_dsa(const struct net_device *dev) { diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index a4cf307859f8..eaa27483f99b 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -382,6 +382,10 @@ struct gdma_irq_context { char name[MANA_IRQ_NAME_SZ]; }; +enum gdma_context_flags { + GC_PROBE_SUCCEEDED = 0, +}; + struct gdma_context { struct device *dev; struct dentry *mana_pci_debugfs; @@ -430,6 +434,8 @@ struct gdma_context { u64 pf_cap_flags1; struct workqueue_struct *service_wq; + + unsigned long flags; }; static inline bool mana_gd_is_mana(struct gdma_dev *gd) @@ -600,6 +606,9 @@ enum { /* Driver can send HWC periodically to query stats */ #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21) +/* Driver can handle hardware recovery events during probe */ +#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ @@ -611,7 +620,8 @@ enum { GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \ GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \ - GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE) + GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \ + GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY) #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 5b01a0e43ebe..a20efabe778f 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/if_hsr.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -1766,6 +1767,70 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, } EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); +/* Helpers for switches without specific HSR offloads, but which can implement + * NETIF_F_HW_HSR_DUP because their tagger uses dsa_xmit_port_mask() + */ +int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + enum hsr_port_type type; + int err; + + err = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type); + if (err) + return err; + + if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) { + NL_SET_ERR_MSG_MOD(extack, + "Only HSR slave ports can be offloaded"); + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_validate); + +int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; + int err; + + err = dsa_port_simple_hsr_validate(ds, port, hsr, extack); + if (err) + return err; + + dsa_hsr_foreach_port(other_dp, ds, hsr) { + if (other_dp != dp) { + dp->user->features |= NETIF_F_HW_HSR_DUP; + other_dp->user->features |= NETIF_F_HW_HSR_DUP; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_join); + +int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; + + dsa_hsr_foreach_port(other_dp, ds, hsr) { + if (other_dp != dp) { + dp->user->features &= ~NETIF_F_HW_HSR_DUP; + other_dp->user->features &= ~NETIF_F_HW_HSR_DUP; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_leave); + static const struct dsa_stubs __dsa_stubs = { .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate, }; diff --git a/net/dsa/port.c b/net/dsa/port.c index 082573ae6864..ca3a7f52229b 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1909,6 +1909,9 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) struct dsa_switch *ds = dp->ds; int err; + if (!dp->hsr_dev) + return; + dp->hsr_dev = NULL; if (ds->ops->port_hsr_leave) { diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 492cbc78ab75..d1bfc49b5f01 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -690,6 +690,26 @@ struct net_device *hsr_get_port_ndev(struct net_device *ndev, } EXPORT_SYMBOL(hsr_get_port_ndev); +int hsr_get_port_type(struct net_device *hsr_dev, struct net_device *dev, + enum hsr_port_type *type) +{ + struct hsr_priv *hsr = netdev_priv(hsr_dev); + struct hsr_port *port; + + rcu_read_lock(); + hsr_for_each_port(hsr, port) { + if (port->dev == dev) { + *type = port->type; + rcu_read_unlock(); + return 0; + } + } + rcu_read_unlock(); + + return -EINVAL; +} +EXPORT_SYMBOL(hsr_get_port_type); + /* Default multicast address for HSR Supervision frames */ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = { 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00 diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 8177ac6c2d26..afe06ba00ea4 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -207,14 +207,14 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, port->type = type; ether_addr_copy(port->original_macaddress, dev->dev_addr); + list_add_tail_rcu(&port->port_list, &hsr->ports); + if (type != HSR_PT_MASTER) { res = hsr_portdev_setup(hsr, dev, port, extack); if (res) goto fail_dev_setup; } - list_add_tail_rcu(&port->port_list, &hsr->ports); - master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); @@ -222,7 +222,8 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, return 0; fail_dev_setup: - kfree(port); + list_del_rcu(&port->port_list); + kfree_rcu(port, rcu); return res; } diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 2d0c8275a3a8..5cfaab7d0890 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -163,7 +163,7 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count, tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0, refcount_read(&tunnel->ref_count)); - seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", + seq_printf(m, " %08x tx %ld/%ld/%ld rx %ld/%ld/%ld\n", 0, atomic_long_read(&tunnel->stats.tx_packets), atomic_long_read(&tunnel->stats.tx_bytes), diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index be9149ac79dd..75ea96c10e49 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -20,7 +20,6 @@ struct mctp_frag_test { static void mctp_test_fragment(struct kunit *test) { const struct mctp_frag_test *params; - struct mctp_test_pktqueue tpq; int rc, i, n, mtu, msgsize; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -43,13 +42,12 @@ static void mctp_test_fragment(struct kunit *test) dev = mctp_test_create_dev(); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); - mctp_test_dst_setup(test, &dst, dev, &tpq, mtu); + mctp_test_dst_setup(test, &dst, dev, mtu); rc = mctp_do_fragment_route(&dst, skb, mtu, MCTP_TAG_OWNER); KUNIT_EXPECT_FALSE(test, rc); - n = tpq.pkts.qlen; - + n = dev->pkts.qlen; KUNIT_EXPECT_EQ(test, n, params->n_frags); for (i = 0;; i++) { @@ -61,8 +59,7 @@ static void mctp_test_fragment(struct kunit *test) first = i == 0; last = i == (n - 1); - skb2 = skb_dequeue(&tpq.pkts); - + skb2 = skb_dequeue(&dev->pkts); if (!skb2) break; @@ -99,7 +96,7 @@ static void mctp_test_fragment(struct kunit *test) kfree_skb(skb2); } - mctp_test_dst_release(&dst, &tpq); + mctp_dst_release(&dst); mctp_test_destroy_dev(dev); } @@ -130,13 +127,11 @@ struct mctp_rx_input_test { static void mctp_test_rx_input(struct kunit *test) { const struct mctp_rx_input_test *params; - struct mctp_test_pktqueue tpq; struct mctp_test_route *rt; struct mctp_test_dev *dev; struct sk_buff *skb; params = test->param_value; - test->priv = &tpq; dev = mctp_test_create_dev(); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); @@ -147,13 +142,10 @@ static void mctp_test_rx_input(struct kunit *test) skb = mctp_test_create_skb(¶ms->hdr, 1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); - mctp_test_pktqueue_init(&tpq); - mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL); - KUNIT_EXPECT_EQ(test, !!tpq.pkts.qlen, params->input); + KUNIT_EXPECT_EQ(test, !!dev->pkts.qlen, params->input); - skb_queue_purge(&tpq.pkts); mctp_test_route_destroy(test, rt); mctp_test_destroy_dev(dev); } @@ -182,7 +174,6 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests, static void __mctp_route_test_init(struct kunit *test, struct mctp_test_dev **devp, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket **sockp, unsigned int netid) { @@ -196,7 +187,7 @@ static void __mctp_route_test_init(struct kunit *test, if (netid != MCTP_NET_ANY) WRITE_ONCE(dev->mdev->net, netid); - mctp_test_dst_setup(test, dst, dev, tpq, 68); + mctp_test_dst_setup(test, dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -215,11 +206,10 @@ static void __mctp_route_test_init(struct kunit *test, static void __mctp_route_test_fini(struct kunit *test, struct mctp_test_dev *dev, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket *sock) { sock_release(sock); - mctp_test_dst_release(dst, tpq); + mctp_dst_release(dst); mctp_test_destroy_dev(dev); } @@ -232,7 +222,6 @@ struct mctp_route_input_sk_test { static void mctp_test_route_input_sk(struct kunit *test) { const struct mctp_route_input_sk_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -241,13 +230,12 @@ static void mctp_test_route_input_sk(struct kunit *test) params = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); skb = mctp_test_create_skb_data(¶ms->hdr, ¶ms->type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); mctp_test_skb_set_dev(skb, dev); - mctp_test_pktqueue_init(&tpq); rc = mctp_dst_input(&dst, skb); @@ -266,7 +254,7 @@ static void mctp_test_route_input_sk(struct kunit *test) KUNIT_EXPECT_NULL(test, skb2); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #define FL_S (MCTP_HDR_FLAG_SOM) @@ -303,7 +291,6 @@ struct mctp_route_input_sk_reasm_test { static void mctp_test_route_input_sk_reasm(struct kunit *test) { const struct mctp_route_input_sk_reasm_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -313,7 +300,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test) params = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); for (i = 0; i < params->n_hdrs; i++) { c = i; @@ -336,7 +323,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test) KUNIT_EXPECT_NULL(test, skb2); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_TO | (f) | ((s) << MCTP_HDR_SEQ_SHIFT)) @@ -438,7 +425,6 @@ struct mctp_route_input_sk_keys_test { static void mctp_test_route_input_sk_keys(struct kunit *test) { const struct mctp_route_input_sk_keys_test *params; - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_sk_key *key; @@ -457,7 +443,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); net = READ_ONCE(dev->mdev->net); - mctp_test_dst_setup(test, &dst, dev, &tpq, 68); + mctp_test_dst_setup(test, &dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -497,7 +483,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test) skb_free_datagram(sock->sk, skb2); mctp_key_unref(key); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } static const struct mctp_route_input_sk_keys_test mctp_route_input_sk_keys_tests[] = { @@ -572,7 +558,6 @@ KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests, struct test_net { unsigned int netid; struct mctp_test_dev *dev; - struct mctp_test_pktqueue tpq; struct mctp_dst dst; struct socket *sock; struct sk_buff *skb; @@ -591,20 +576,18 @@ mctp_test_route_input_multiple_nets_bind_init(struct kunit *test, t->msg.data = t->netid; - __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock, - t->netid); + __mctp_route_test_init(test, &t->dev, &t->dst, &t->sock, t->netid); t->skb = mctp_test_create_skb_data(&hdr, &t->msg); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb); mctp_test_skb_set_dev(t->skb, t->dev); - mctp_test_pktqueue_init(&t->tpq); } static void mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test, struct test_net *t) { - __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock); + __mctp_route_test_fini(test, t->dev, &t->dst, t->sock); } /* Test that skbs from different nets (otherwise identical) get routed to their @@ -661,8 +644,7 @@ mctp_test_route_input_multiple_nets_key_init(struct kunit *test, t->msg.data = t->netid; - __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock, - t->netid); + __mctp_route_test_init(test, &t->dev, &t->dst, &t->sock, t->netid); msk = container_of(t->sock->sk, struct mctp_sock, sk); @@ -685,7 +667,7 @@ mctp_test_route_input_multiple_nets_key_fini(struct kunit *test, struct test_net *t) { mctp_key_unref(t->key); - __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock); + __mctp_route_test_fini(test, t->dev, &t->dst, t->sock); } /* test that skbs from different nets (otherwise identical) get routed to their @@ -738,14 +720,13 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test) static void mctp_test_route_input_sk_fail_single(struct kunit *test) { const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO); - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct mctp_dst dst; struct socket *sock; struct sk_buff *skb; int rc; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this. @@ -768,7 +749,7 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test) KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1); kfree_skb(skb); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } /* Input route to socket, using a fragmented message, where sock delivery fails. @@ -776,7 +757,6 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test) static void mctp_test_route_input_sk_fail_frag(struct kunit *test) { const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) }; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct sk_buff *skbs[2]; struct mctp_dst dst; @@ -784,7 +764,7 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) unsigned int i; int rc; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); lock_sock(sock->sk); WRITE_ONCE(sock->sk->sk_rcvbuf, 0); @@ -815,7 +795,7 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1); kfree_skb(skbs[1]); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } /* Input route to socket, using a fragmented message created from clones. @@ -833,7 +813,6 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) const size_t data_len = 3; /* arbitrary */ u8 compare[3 * ARRAY_SIZE(hdrs)]; u8 flat[3 * ARRAY_SIZE(hdrs)]; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct sk_buff *skb[5]; struct sk_buff *rx_skb; @@ -845,7 +824,7 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) total = data_len + sizeof(struct mctp_hdr); - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); /* Create a single skb initially with concatenated packets */ skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total); @@ -922,7 +901,7 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) kfree_skb(skb[i]); } - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } #if IS_ENABLED(CONFIG_MCTP_FLOWS) @@ -930,7 +909,6 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test) static void mctp_test_flow_init(struct kunit *test, struct mctp_test_dev **devp, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket **sock, struct sk_buff **skbp, unsigned int len) @@ -944,7 +922,7 @@ static void mctp_test_flow_init(struct kunit *test, * mctp_local_output, which will call dst->output on whatever * route we provide */ - __mctp_route_test_init(test, &dev, dst, tpq, sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, dst, sock, MCTP_NET_ANY); /* Assign a single EID. ->addrs is freed on mctp netdev release */ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL); @@ -965,16 +943,14 @@ static void mctp_test_flow_init(struct kunit *test, static void mctp_test_flow_fini(struct kunit *test, struct mctp_test_dev *dev, struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq, struct socket *sock) { - __mctp_route_test_fini(test, dev, dst, tpq, sock); + __mctp_route_test_fini(test, dev, dst, sock); } /* test that an outgoing skb has the correct MCTP extension data set */ static void mctp_test_packet_flow(struct kunit *test) { - struct mctp_test_pktqueue tpq; struct sk_buff *skb, *skb2; struct mctp_test_dev *dev; struct mctp_dst dst; @@ -983,15 +959,15 @@ static void mctp_test_packet_flow(struct kunit *test) u8 dst_eid = 8; int n, rc; - mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 30); + mctp_test_flow_init(test, &dev, &dst, &sock, &skb, 30); rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER); KUNIT_ASSERT_EQ(test, rc, 0); - n = tpq.pkts.qlen; + n = dev->pkts.qlen; KUNIT_ASSERT_EQ(test, n, 1); - skb2 = skb_dequeue(&tpq.pkts); + skb2 = skb_dequeue(&dev->pkts); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2); flow = skb_ext_find(skb2, SKB_EXT_MCTP); @@ -1000,7 +976,7 @@ static void mctp_test_packet_flow(struct kunit *test) KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk); kfree_skb(skb2); - mctp_test_flow_fini(test, dev, &dst, &tpq, sock); + mctp_test_flow_fini(test, dev, &dst, sock); } /* test that outgoing skbs, after fragmentation, all have the correct MCTP @@ -1008,7 +984,6 @@ static void mctp_test_packet_flow(struct kunit *test) */ static void mctp_test_fragment_flow(struct kunit *test) { - struct mctp_test_pktqueue tpq; struct mctp_flow *flows[2]; struct sk_buff *tx_skbs[2]; struct mctp_test_dev *dev; @@ -1018,17 +993,17 @@ static void mctp_test_fragment_flow(struct kunit *test) u8 dst_eid = 8; int n, rc; - mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 100); + mctp_test_flow_init(test, &dev, &dst, &sock, &skb, 100); rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER); KUNIT_ASSERT_EQ(test, rc, 0); - n = tpq.pkts.qlen; + n = dev->pkts.qlen; KUNIT_ASSERT_EQ(test, n, 2); /* both resulting packets should have the same flow data */ - tx_skbs[0] = skb_dequeue(&tpq.pkts); - tx_skbs[1] = skb_dequeue(&tpq.pkts); + tx_skbs[0] = skb_dequeue(&dev->pkts); + tx_skbs[1] = skb_dequeue(&dev->pkts); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]); @@ -1044,7 +1019,7 @@ static void mctp_test_fragment_flow(struct kunit *test) kfree_skb(tx_skbs[0]); kfree_skb(tx_skbs[1]); - mctp_test_flow_fini(test, dev, &dst, &tpq, sock); + mctp_test_flow_fini(test, dev, &dst, sock); } #else @@ -1063,7 +1038,6 @@ static void mctp_test_fragment_flow(struct kunit *test) static void mctp_test_route_output_key_create(struct kunit *test) { const u8 dst_eid = 26, src_eid = 15; - struct mctp_test_pktqueue tpq; const unsigned int netid = 50; struct mctp_test_dev *dev; struct mctp_sk_key *key; @@ -1080,7 +1054,7 @@ static void mctp_test_route_output_key_create(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); WRITE_ONCE(dev->mdev->net, netid); - mctp_test_dst_setup(test, &dst, dev, &tpq, 68); + mctp_test_dst_setup(test, &dst, dev, 68); rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); KUNIT_ASSERT_EQ(test, rc, 0); @@ -1127,14 +1101,13 @@ static void mctp_test_route_output_key_create(struct kunit *test) KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER); sock_release(sock); - mctp_test_dst_release(&dst, &tpq); + mctp_dst_release(&dst); mctp_test_destroy_dev(dev); } static void mctp_test_route_extaddr_input(struct kunit *test) { static const unsigned char haddr[] = { 0xaa, 0x55 }; - struct mctp_test_pktqueue tpq; struct mctp_skb_cb *cb, *cb2; const unsigned int len = 40; struct mctp_test_dev *dev; @@ -1149,7 +1122,7 @@ static void mctp_test_route_extaddr_input(struct kunit *test) hdr.dest = 8; hdr.flags_seq_tag = FL_S | FL_E | FL_TO; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY); + __mctp_route_test_init(test, &dev, &dst, &sock, MCTP_NET_ANY); skb = mctp_test_create_skb(&hdr, len); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); @@ -1178,7 +1151,7 @@ static void mctp_test_route_extaddr_input(struct kunit *test) KUNIT_EXPECT_MEMEQ(test, cb2->haddr, haddr, sizeof(haddr)); kfree_skb(skb2); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock); + __mctp_route_test_fini(test, dev, &dst, sock); } static void mctp_test_route_gw_lookup(struct kunit *test) @@ -1530,14 +1503,13 @@ static void mctp_test_bind_lookup(struct kunit *test) struct socket *socks[ARRAY_SIZE(lookup_binds)]; struct sk_buff *skb_pkt = NULL, *skb_sock = NULL; struct socket *sock_ty0, *sock_expect = NULL; - struct mctp_test_pktqueue tpq; struct mctp_test_dev *dev; struct mctp_dst dst; int rc; rx = test->param_value; - __mctp_route_test_init(test, &dev, &dst, &tpq, &sock_ty0, rx->net); + __mctp_route_test_init(test, &dev, &dst, &sock_ty0, rx->net); /* Create all binds */ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) { mctp_test_bind_run(test, &lookup_binds[i], @@ -1557,7 +1529,6 @@ static void mctp_test_bind_lookup(struct kunit *test) skb_pkt = mctp_test_create_skb_data(&rx->hdr, &rx->ty); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb_pkt); mctp_test_skb_set_dev(skb_pkt, dev); - mctp_test_pktqueue_init(&tpq); rc = mctp_dst_input(&dst, skb_pkt); if (rx->expect) { @@ -1591,7 +1562,7 @@ cleanup: for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) sock_release(socks[i]); - __mctp_route_test_fini(test, dev, &dst, &tpq, sock_ty0); + __mctp_route_test_fini(test, dev, &dst, sock_ty0); } static struct kunit_case mctp_test_cases[] = { diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c index 35f6be814567..37f1ba62a2ab 100644 --- a/net/mctp/test/utils.c +++ b/net/mctp/test/utils.c @@ -13,7 +13,10 @@ static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb, struct net_device *ndev) { - kfree_skb(skb); + struct mctp_test_dev *dev = netdev_priv(ndev); + + skb_queue_tail(&dev->pkts, skb); + return NETDEV_TX_OK; } @@ -26,7 +29,7 @@ static void mctp_test_dev_setup(struct net_device *ndev) ndev->type = ARPHRD_MCTP; ndev->mtu = MCTP_DEV_TEST_MTU; ndev->hard_header_len = 0; - ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + ndev->tx_queue_len = 0; ndev->flags = IFF_NOARP; ndev->netdev_ops = &mctp_test_netdev_ops; ndev->needs_free_netdev = true; @@ -51,6 +54,7 @@ static struct mctp_test_dev *__mctp_test_create_dev(unsigned short lladdr_len, dev->ndev = ndev; ndev->addr_len = lladdr_len; dev_addr_set(ndev, lladdr); + skb_queue_head_init(&dev->pkts); rc = register_netdev(ndev); if (rc) { @@ -63,6 +67,11 @@ static struct mctp_test_dev *__mctp_test_create_dev(unsigned short lladdr_len, dev->mdev->net = mctp_default_net(dev_net(ndev)); rcu_read_unlock(); + /* bring the device up; we want to be able to TX immediately */ + rtnl_lock(); + dev_open(ndev, NULL); + rtnl_unlock(); + return dev; } @@ -79,26 +88,15 @@ struct mctp_test_dev *mctp_test_create_dev_lladdr(unsigned short lladdr_len, void mctp_test_destroy_dev(struct mctp_test_dev *dev) { + skb_queue_purge(&dev->pkts); mctp_dev_put(dev->mdev); unregister_netdev(dev->ndev); } -static const unsigned int test_pktqueue_magic = 0x5f713aef; - -void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq) -{ - tpq->magic = test_pktqueue_magic; - skb_queue_head_init(&tpq->pkts); -} - static int mctp_test_dst_output(struct mctp_dst *dst, struct sk_buff *skb) { - struct kunit *test = current->kunit_test; - struct mctp_test_pktqueue *tpq = test->priv; - - KUNIT_ASSERT_EQ(test, tpq->magic, test_pktqueue_magic); - - skb_queue_tail(&tpq->pkts, skb); + skb->dev = dst->dev->dev; + dev_queue_xmit(skb); return 0; } @@ -169,11 +167,9 @@ struct mctp_test_route *mctp_test_create_route_gw(struct net *net, return rt; } -/* Convenience function for our test dst; release with mctp_test_dst_release() - */ +/* Convenience function for our test dst; release with mctp_dst_release() */ void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, - struct mctp_test_dev *dev, - struct mctp_test_pktqueue *tpq, unsigned int mtu) + struct mctp_test_dev *dev, unsigned int mtu) { KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev); @@ -183,15 +179,6 @@ void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, __mctp_dev_get(dst->dev->dev); dst->mtu = mtu; dst->output = mctp_test_dst_output; - mctp_test_pktqueue_init(tpq); - test->priv = tpq; -} - -void mctp_test_dst_release(struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq) -{ - mctp_dst_release(dst); - skb_queue_purge(&tpq->pkts); } void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt) diff --git a/net/mctp/test/utils.h b/net/mctp/test/utils.h index 06bdb6cb5eff..4cc90c9da4d1 100644 --- a/net/mctp/test/utils.h +++ b/net/mctp/test/utils.h @@ -18,6 +18,8 @@ struct mctp_test_dev { unsigned short lladdr_len; unsigned char lladdr[MAX_ADDR_LEN]; + + struct sk_buff_head pkts; }; struct mctp_test_dev; @@ -26,11 +28,6 @@ struct mctp_test_route { struct mctp_route rt; }; -struct mctp_test_pktqueue { - unsigned int magic; - struct sk_buff_head pkts; -}; - struct mctp_test_bind_setup { mctp_eid_t bind_addr; int bind_net; @@ -59,11 +56,7 @@ struct mctp_test_route *mctp_test_create_route_gw(struct net *net, mctp_eid_t gw, unsigned int mtu); void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst, - struct mctp_test_dev *dev, - struct mctp_test_pktqueue *tpq, unsigned int mtu); -void mctp_test_dst_release(struct mctp_dst *dst, - struct mctp_test_pktqueue *tpq); -void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq); + struct mctp_test_dev *dev, unsigned int mtu); void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt); void mctp_test_skb_set_dev(struct sk_buff *skb, struct mctp_test_dev *dev); struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr, diff --git a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh index 87f89fd92f8c..ae8abff4be40 100644 --- a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh +++ b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh @@ -249,7 +249,7 @@ function listen_port_and_save_to() { # Just wait for 2 seconds timeout 2 ip netns exec "${NAMESPACE}" \ - socat "${SOCAT_MODE}":"${PORT}",fork "${OUTPUT}" + socat "${SOCAT_MODE}":"${PORT}",fork "${OUTPUT}" 2> /dev/null } # Only validate that the message arrived properly diff --git a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh index 92eb880c52f2..00758f00efbf 100755 --- a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh +++ b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh @@ -75,7 +75,7 @@ setup_v4() { ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 if [ $? -ne 0 ]; then cleanup_v4 - echo "failed" + echo "failed; is the system using MACAddressPolicy=persistent ?" exit 1 fi diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py index ebd82940ee50..531e7fa1b3ea 100644 --- a/tools/testing/selftests/net/lib/py/ksft.py +++ b/tools/testing/selftests/net/lib/py/ksft.py @@ -163,7 +163,7 @@ def ksft_flush_defer(): entry = global_defer_queue.pop() try: entry.exec_only() - except BaseException: + except Exception: ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!") tb = traceback.format_exc() for line in tb.strip().split('\n'): @@ -333,7 +333,21 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()): KSFT_RESULT = False cnt_key = 'fail' - ksft_flush_defer() + try: + ksft_flush_defer() + except BaseException as e: + tb = traceback.format_exc() + for line in tb.strip().split('\n'): + ksft_pr("Exception|", line) + if isinstance(e, KeyboardInterrupt): + ksft_pr() + ksft_pr("WARN: defer() interrupted, cleanup may be incomplete.") + ksft_pr(" Attempting to finish cleanup before exiting.") + ksft_pr(" Interrupt again to exit immediately.") + ksft_pr() + stop = True + # Flush was interrupted, try to finish the job best we can + ksft_flush_defer() if not cnt_key: cnt_key = 'pass' if KSFT_RESULT else 'fail' |
