summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlxsw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c724
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c478
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
10 files changed, 1039 insertions, 598 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 068ee65a960b..aa33d58b9f81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -1146,10 +1146,10 @@ err_hwmon_init:
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d3476ead9982..c4f550b6f783 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -87,6 +87,7 @@ struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
u8 local_port;
u16 trap_id;
+ enum mlxsw_reg_hpkt_action action;
};
struct mlxsw_event_listener {
@@ -178,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
- used_max_lag:1,
- used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -191,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_sizes:1;
+ used_kvd_split_data:1; /* indicate for the kvd's values */
+
u8 max_vepa_channels;
- u16 max_lag;
- u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -213,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u32 kvd_hash_single_size;
- u32 kvd_hash_double_size;
+ u16 kvd_hash_granularity;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -268,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
- u8 max_span_valid:1;
+ u32 max_span_valid:1,
+ max_lag_valid:1,
+ max_ports_in_lag_valid:1,
+ kvd_size_valid:1,
+ kvd_single_min_size_valid:1,
+ kvd_double_min_size_valid:1,
+ max_virtual_routers_valid:1,
+ max_system_ports_valid:1,
+ max_vlan_groups_valid:1,
+ max_regions_valid:1,
+ max_rif_valid:1;
u8 max_span;
+ u8 max_lag;
+ u8 max_ports_in_lag;
+ u32 kvd_size;
+ u32 kvd_single_min_size;
+ u32 kvd_double_min_size;
+ u16 max_virtual_routers;
+ u16 max_system_ports;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u16 max_rif;
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ u32 kvd_single_size;
+ u32 kvd_double_size;
+ u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1d1360c178bb..e742bd4e8894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_MAX_LAG_ID 0x2520
+#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
+#define MLXSW_KVD_SIZE_ID 0x1001
+#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
+#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
+#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
+#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
+#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
+#define MLXSW_MAX_REGIONS_ID 0x2901
+#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
+ case MLXSW_MAX_LAG_ID:
+ resources->max_lag = val;
+ resources->max_lag_valid = 1;
+ break;
+ case MLXSW_MAX_PORTS_IN_LAG_ID:
+ resources->max_ports_in_lag = val;
+ resources->max_ports_in_lag_valid = 1;
+ break;
+ case MLXSW_KVD_SIZE_ID:
+ resources->kvd_size = val;
+ resources->kvd_size_valid = 1;
+ break;
+ case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
+ resources->kvd_single_min_size = val;
+ resources->kvd_single_min_size_valid = 1;
+ break;
+ case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
+ resources->kvd_double_min_size = val;
+ resources->kvd_double_min_size_valid = 1;
+ break;
+ case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
+ resources->max_virtual_routers = val;
+ resources->max_virtual_routers_valid = 1;
+ break;
+ case MLXSW_MAX_SYSTEM_PORT_ID:
+ resources->max_system_ports = val;
+ resources->max_system_ports_valid = 1;
+ break;
+ case MLXSW_MAX_VLAN_GROUPS_ID:
+ resources->max_vlan_groups = val;
+ resources->max_vlan_groups_valid = 1;
+ break;
+ case MLXSW_MAX_REGIONS_ID:
+ resources->max_regions = val;
+ resources->max_regions_valid = 1;
+ break;
+ case MLXSW_MAX_RIF_ID:
+ resources->max_rif = val;
+ resources->max_rif_valid = 1;
+ break;
default:
break;
}
@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
+static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
+{
+ u32 singles_size, doubles_size, linear_size;
+
+ if (!resources->kvd_single_min_size_valid ||
+ !resources->kvd_double_min_size_valid ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ linear_size = profile->kvd_linear_size;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile.
+ */
+ doubles_size = (resources->kvd_size - linear_size);
+ doubles_size *= profile->kvd_hash_double_parts;
+ doubles_size /= (profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts);
+ doubles_size /= profile->kvd_hash_granularity;
+ doubles_size *= profile->kvd_hash_granularity;
+ singles_size = resources->kvd_size - doubles_size -
+ linear_size;
+
+ /* Check results are legal. */
+ if (singles_size < resources->kvd_single_min_size ||
+ doubles_size < resources->kvd_double_min_size ||
+ resources->kvd_size < linear_size)
+ return -EIO;
+
+ resources->kvd_single_size = singles_size;
+ resources->kvd_double_size = doubles_size;
+ resources->kvd_linear_size = linear_size;
+
+ return 0;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
int i;
+ int err;
mlxsw_cmd_mbox_zero(mbox);
@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (profile->used_kvd_sizes) {
- mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
- mbox, profile->kvd_linear_size);
- mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
- mbox, profile->kvd_hash_single_size);
+ if (resources->kvd_size_valid) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ resources->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
- mbox, profile->kvd_hash_double_size);
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1721098eef13..6460c7256f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = {
*/
MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+/* reg_sfn_end
+ * Forces the current session to end.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
+
/* reg_sfn_num_rec
* Request: Number of learned notifications and aged-out notification
* records requested.
@@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_end_set(payload, 1);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}
@@ -1385,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
- mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
}
@@ -2131,6 +2138,18 @@ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+enum {
+ MLXSW_REG_PTYS_AN_STATUS_NA,
+ MLXSW_REG_PTYS_AN_STATUS_OK,
+ MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
@@ -2145,6 +2164,7 @@ MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
@@ -2177,6 +2197,13 @@ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+/* reg_ptys_eth_proto_lp_advertise
+ * The protocols that were advertised by the link partner during
+ * autonegotiation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+
static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
u32 proto_admin)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d48873bcbddf..1ec0a4ce3c46 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -248,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
@@ -262,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
return NULL;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -364,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
/* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
if (err)
goto err_mpar_reg_write;
@@ -405,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
return;
/* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
/* remove the SBIB buffer if it was egress SPAN */
@@ -556,8 +560,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -566,13 +571,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
- learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ learn_enable);
+}
+
static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -811,9 +823,9 @@ err_span_port_mtu_update:
return err;
}
-static struct rtnl_link_stats64 *
-mlxsw_sp_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_pcpu_stats *p;
@@ -840,6 +852,107 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ mlxsw_sp_port->hw_stats.cache);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+
return stats;
}
@@ -974,10 +1087,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
goto err_port_vp_mode_trans;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err)
- goto err_port_vid_learning_set;
-
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err)
goto err_port_add_vid;
@@ -985,8 +1094,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return 0;
err_port_add_vid:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-err_port_vid_learning_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
@@ -1013,8 +1120,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-
/* Drop FID reference. If this was the last reference the
* resources will be freed.
*/
@@ -1209,6 +1314,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
@@ -1547,8 +1654,6 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1557,10 +1662,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
- data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -1599,112 +1703,149 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
}
struct mlxsw_sp_port_link_mode {
+ enum ethtool_link_mode_bit_indices mask_ethtool;
u32 mask;
- u32 supported;
- u32 advertised;
u32 speed;
};
static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = SPEED_20000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .speed = 25000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = SPEED_50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
- .speed = 50000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .speed = 100000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ .speed = SPEED_100000,
},
};
#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
-static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+static void
+mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -1712,43 +1853,29 @@ static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
- return SUPPORTED_Backplane;
- return 0;
-}
-
-static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].supported;
- }
- return modes;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
}
-static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
{
- u32 modes = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].advertised;
+ __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ mode);
}
- return modes;
}
static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -1765,8 +1892,8 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
}
static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
@@ -1791,49 +1918,15 @@ static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
return PORT_OTHER;
}
-static int mlxsw_sp_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 eth_proto_oper;
- int err;
-
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
-
- cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
- mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause |
- SUPPORTED_Autoneg;
- cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
- mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
- eth_proto_oper, cmd);
-
- eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
- cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
-
- cmd->transceiver = XCVR_INTERNAL;
- return 0;
-}
-
-static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+static u32
+mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1863,61 +1956,113 @@ static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
return ptys_proto;
}
-static int mlxsw_sp_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+ mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
+ mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+}
+
+static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!autoneg)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+}
+
+static void
+mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 speed;
- u32 eth_proto_new;
- u32 eth_proto_cap;
- u32 eth_proto_admin;
+ u8 autoneg_status;
+ bool autoneg;
int err;
- speed = ethtool_cmd_speed(cmd);
+ autoneg = mlxsw_sp_port->link.autoneg;
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper);
+
+ mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
+
+ mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
+
+ eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
+ autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
+ mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
+
+ cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
+ cmd);
+
+ return 0;
+}
- eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
- mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
- mlxsw_sp_to_ptys_speed(speed);
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap, eth_proto_new;
+ bool autoneg;
+ int err;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
+ if (err)
return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "Not supported proto admin requested");
+ netdev_err(dev, "No supported speed requested\n");
return -EINVAL;
}
- if (eth_proto_new == eth_proto_admin)
- return 0;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to set proto admin");
+ if (err)
return err;
- }
if (!netif_running(dev))
return 0;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port->link.autoneg = autoneg;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
return 0;
}
@@ -1931,8 +2076,8 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
.get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_settings = mlxsw_sp_port_get_settings,
- .set_settings = mlxsw_sp_port_set_settings,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
};
static int
@@ -2082,6 +2227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->link.autoneg = 1;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -2103,6 +2249,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->hw_stats.cache =
+ kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
+
+ if (!mlxsw_sp_port->hw_stats.cache) {
+ err = -ENOMEM;
+ goto err_alloc_hw_stats;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ &update_stats_cache);
+
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
@@ -2129,7 +2285,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
@@ -2203,6 +2359,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
err_core_port_init:
@@ -2223,6 +2380,8 @@ err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ kfree(mlxsw_sp_port->hw_stats.cache);
+err_alloc_hw_stats:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2239,6 +2398,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
+ cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -2248,6 +2408,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2571,123 +2732,47 @@ static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_func(skb, local_port, priv);
+}
+
+#define MLXSW_SP_RXL(_func, _trap_id, _action) \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .action = MLXSW_REG_HPKT_ACTION_##_action, \
+ }
+
static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_FDB_MC,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU),
/* Traps for specific L2 packet types, not trapped as FDB MC */
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_STP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LACP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_EAPOL,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LLDP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MMRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MVRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RPVST,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_DHCP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPBC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPUC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MTUERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_TTLERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LBERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_OSPF,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IP2ME,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU),
+ /* L3 traps */
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU),
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2714,7 +2799,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rx_listener_register;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action,
mlxsw_sp_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@@ -2802,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
@@ -2813,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
@@ -2897,6 +3003,7 @@ err_span_init:
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
@@ -2910,38 +3017,26 @@ err_rx_listener_register:
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 64,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -2953,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_sizes = 1,
+ .used_kvd_split_data = 1,
+ .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .kvd_hash_single_parts = 2,
+ .kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
- .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
- .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -3073,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
- return MLXSW_SP_RIF_MAX;
+ return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
@@ -3159,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
@@ -3391,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
@@ -3598,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3809,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3847,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ac48abebe904..9b22863a924b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -45,7 +45,7 @@
#include <linux/list.h>
#include <linux/dcbnl.h>
#include <linux/in6.h>
-#include <net/switchdev.h>
+#include <linux/notifier.h>
#include "port.h"
#include "core.h"
@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_RIF_MAX 800
-
-#define MLXSW_SP_LAG_MAX 64
-#define MLXSW_SP_PORT_PER_LAG_MAX 16
+#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
-#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
-#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
- struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
@@ -263,6 +257,7 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_group_list;
struct list_head nexthop_neighs_list;
+ bool aborted;
};
struct mlxsw_sp {
@@ -275,7 +270,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
+ struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -290,7 +285,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
- struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
@@ -302,6 +297,7 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -341,7 +337,8 @@ struct mlxsw_sp_port {
} vport;
struct {
u8 tx_pause:1,
- rx_pause:1;
+ rx_pause:1,
+ autoneg:1;
} link;
struct {
struct ieee_ets *ets;
@@ -360,6 +357,11 @@ struct mlxsw_sp_port {
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 *cache;
+ struct delayed_work update_dw;
+ } hw_stats;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
@@ -477,9 +479,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+
+ for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
@@ -558,6 +563,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -578,11 +586,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 953b214f38d0..bcaed8a38037 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
- pool_info->pool_type = dir;
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
- pool_info->threshold_type = pr->mode;
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+ enum mlxsw_reg_sbpr_mode mode;
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, pool_type);
+ *p_pool_index = pool_index_get(cm->pool, dir);
return 0;
}
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3f5c51da6d3e..78fc557d6dd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -43,6 +43,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/ip_fib.h>
#include "spectrum.h"
#include "core.h"
@@ -122,17 +123,20 @@ struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
+ struct list_head list;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
+ struct fib_info *fi;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
+ struct list_head entry_list;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -154,6 +158,7 @@ static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
mlxsw_sp_fib_ht_params);
if (err)
return err;
+ list_add_tail(&fib_entry->list, &fib->entry_list);
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
return 0;
@@ -166,6 +171,7 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ list_del(&fib_entry->list);
rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
}
@@ -216,6 +222,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&fib->entry_list);
return fib;
err_rhashtable_init:
@@ -252,7 +259,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -261,7 +270,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -368,10 +379,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -384,7 +397,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto,
+ vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -394,7 +409,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -410,11 +426,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -548,15 +567,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
-static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_virtual_routers_valid)
+ return -EIO;
+
+ mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
+ sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
@@ -1081,9 +1118,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
{
char raleu_pl[MLXSW_REG_RALEU_LEN];
- mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
- adj_index, ecmp_size,
- new_adj_index, new_ecmp_size);
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1489,50 +1527,6 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
}
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
- err = __mlxsw_sp_router_init(mlxsw_sp);
- if (err)
- return err;
- mlxsw_sp_lpm_init(mlxsw_sp);
- mlxsw_sp_vrs_init(mlxsw_sp);
- err = mlxsw_sp_neigh_init(mlxsw_sp);
- if (err)
- goto err_neigh_init;
- return 0;
-
-err_neigh_init:
- __mlxsw_sp_router_fini(mlxsw_sp);
- return err;
-}
-
-void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_neigh_fini(mlxsw_sp);
- __mlxsw_sp_router_fini(mlxsw_sp);
-}
-
static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -1558,8 +1552,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1568,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
@@ -1589,8 +1585,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1637,94 +1634,102 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
-struct mlxsw_sp_router_fib4_add_info {
- struct switchdev_trans_item tritem;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_fib_entry *fib_entry;
-};
-
-static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
-{
- const struct mlxsw_sp_router_fib4_add_info *info = data;
- struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
- struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
-
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
- kfree(info);
-}
-
static int
mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4,
+ const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
+ struct mlxsw_sp_rif *r = NULL;
+ int nhsel;
+ int err;
- if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
}
- if (fib4->type != RTN_UNICAST)
+ if (fen_info->type != RTN_UNICAST)
return -EINVAL;
- if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
- struct mlxsw_sp_rif *r;
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (!nh->nh_dev)
+ continue;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
+ if (!r) {
+ /* In case router interface is not found for
+ * at least one of the nexthops, that means
+ * the nexthop points to some device unrelated
+ * to us. Set trap and pass the packets for
+ * this prefix to kernel.
+ */
+ break;
+ }
+ }
+ if (!r) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
- if (!r)
- return -EINVAL;
fib_entry->rif = r->rif;
- return 0;
+ } else {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ if (err)
+ return err;
}
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
}
static void
mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
- return;
- mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ fib_info_offload_dec(fib_entry->fi);
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return ERR_CAST(vr);
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (fib_entry) {
/* Already exists, just take a reference */
fib_entry->ref_count++;
return fib_entry;
}
- fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->fi = fi;
fib_entry->ref_count = 1;
- err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
if (err)
goto err_fib4_entry_init;
@@ -1740,21 +1745,23 @@ err_fib_entry_create:
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
if (!vr)
return NULL;
- return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len,
- fib4->fi->fib_dev);
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len,
+ fen_info->fi->fib_dev);
}
-void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_vr *vr = fib_entry->vr;
@@ -1765,60 +1772,43 @@ void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
- struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ unsigned int last_ref_count;
- fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
- if (IS_ERR(fib_entry))
- return PTR_ERR(fib_entry);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto err_alloc_info;
- }
- info->mlxsw_sp = mlxsw_sp;
- info->fib_entry = fib_entry;
- switchdev_trans_item_enqueue(trans, info,
- mlxsw_sp_router_fib4_add_info_destroy,
- &info->tritem);
- return 0;
-
-err_alloc_info:
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return err;
+ do {
+ last_ref_count = fib_entry->ref_count;
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ } while (last_ref_count != 1);
}
-static int
-mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
- info = switchdev_trans_item_dequeue(trans);
- fib_entry = info->fib_entry;
- kfree(info);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
+ return PTR_ERR(fib_entry);
+ }
if (fib_entry->ref_count != 1)
return 0;
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
- if (err)
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ }
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1830,24 +1820,15 @@ err_fib_entry_insert:
return err;
}
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- if (switchdev_trans_ph_prepare(trans))
- return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
- fib4, trans);
- return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
- fib4, trans);
-}
-
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
@@ -1861,3 +1842,172 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
+
+static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ int err;
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_entry *tmp;
+ struct mlxsw_sp_vr *vr;
+ int i;
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ continue;
+
+ list_for_each_entry_safe(fib_entry, tmp,
+ &vr->fib->entry_list, list) {
+ bool do_break = &tmp->list == &vr->fib->entry_list;
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
+ fib_entry);
+ mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+ if (do_break)
+ break;
+ }
+ }
+ mlxsw_sp->router.aborted = true;
+ err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_rif_valid)
+ return -EIO;
+
+ mlxsw_sp->rifs = kcalloc(resources->max_rif,
+ sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
+ if (err)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ register_fib_notifier(&mlxsw_sp->fib_nb);
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b654c517b91..5e00c79e8133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -254,12 +254,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
+static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ u16 vid;
+ int err;
+
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ }
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ if (err)
+ goto err_port_vid_learning_set;
+ }
+
+ return 0;
+
+err_port_vid_learning_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+ return err;
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
+ unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
- bool set;
int err;
if (!mlxsw_sp_port->bridged)
@@ -269,17 +297,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- set = mlxsw_sp_port->uc_flood ? false : true;
- err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ !mlxsw_sp_port->uc_flood);
if (err)
return err;
}
+ if ((learning ^ brport_flags) & BR_LEARNING) {
+ err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
+ !mlxsw_sp_port->learning);
+ if (err)
+ goto err_port_learning_set;
+ }
+
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0;
+
+err_port_learning_set:
+ if ((uc_flood ^ brport_flags) & BR_FLOOD)
+ mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ mlxsw_sp_port->uc_flood);
+ return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -631,6 +672,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ vid_e, learn_enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
@@ -671,6 +733,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ mlxsw_sp_port->learning);
+ if (err) {
+ netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
+ vid_begin, vid_end);
+ goto err_port_vid_learning_set;
+ }
+
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++) {
set_bit(vid, mlxsw_sp_port->active_vlans);
@@ -693,6 +763,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
+err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
@@ -971,11 +1044,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -997,29 +1065,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end)
{
- struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
- int err;
if (!mlxsw_sp_port->bridged)
return -EINVAL;
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
- vid_end);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID %d\n", pvid);
- return err;
- }
- }
+ if (pvid >= vid_begin && pvid <= vid_end)
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
+
+ __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
+ false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
@@ -1117,10 +1176,6 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1141,9 +1196,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
@@ -1362,8 +1419,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1425,8 +1480,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1492,20 +1545,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
rtnl_lock();
- do {
- mlxsw_reg_sfn_pack(sfn_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
- if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
- break;
- }
- num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
- for (i = 0; i < num_rec; i++)
- mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
- } while (num_rec);
+out:
rtnl_unlock();
-
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 377daa4d509c..c0c23e2f3275 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -997,7 +997,7 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
if (err) {
@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = 64,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,