summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/bridge/br_fdb.c433
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_mdb.c250
-rw-r--r--net/bridge/br_private.h45
-rw-r--r--net/bridge/br_switchdev.c449
-rw-r--r--net/bridge/br_vlan.c206
-rw-r--r--net/core/flow_offload.c42
-rw-r--r--net/core/of_net.c4
-rw-r--r--net/core/skbuff.c26
-rw-r--r--net/dsa/Kconfig20
-rw-r--r--net/dsa/Makefile3
-rw-r--r--net/dsa/dsa.c122
-rw-r--r--net/dsa/dsa2.c304
-rw-r--r--net/dsa/dsa_priv.h208
-rw-r--r--net/dsa/master.c23
-rw-r--r--net/dsa/port.c787
-rw-r--r--net/dsa/slave.c916
-rw-r--r--net/dsa/switch.c792
-rw-r--r--net/dsa/tag_8021q.c386
-rw-r--r--net/dsa/tag_dsa.c22
-rw-r--r--net/dsa/tag_ocelot.c45
-rw-r--r--net/dsa/tag_ocelot_8021q.c63
-rw-r--r--net/dsa/tag_rtl4_a.c2
-rw-r--r--net/dsa/tag_rtl8_4.c178
-rw-r--r--net/dsa/tag_sja1105.c243
-rw-r--r--net/ethernet/eth.c25
-rw-r--r--net/sched/act_api.c93
-rw-r--r--net/sched/act_csum.c19
-rw-r--r--net/sched/act_ct.c21
-rw-r--r--net/sched/act_gact.c38
-rw-r--r--net/sched/act_gate.c49
-rw-r--r--net/sched/act_mirred.c50
-rw-r--r--net/sched/act_mpls.c52
-rw-r--r--net/sched/act_pedit.c34
-rw-r--r--net/sched/act_police.c71
-rw-r--r--net/sched/act_sample.c30
-rw-r--r--net/sched/act_skbedit.c36
-rw-r--r--net/sched/act_tunnel_key.c54
-rw-r--r--net/sched/act_vlan.c48
-rw-r--r--net/sched/cls_api.c254
-rw-r--r--net/sched/cls_flower.c8
-rw-r--r--net/sched/cls_matchall.c8
-rw-r--r--net/switchdev/switchdev.c358
-rw-r--r--net/tsn/Kconfig15
-rw-r--r--net/tsn/Makefile1
-rw-r--r--net/tsn/genl_tsn.c3730
-rw-r--r--net/xdp/xdp_umem.c2
-rw-r--r--net/xdp/xsk.c1
-rw-r--r--net/xdp/xsk_buff_pool.c88
-rw-r--r--net/xdp/xsk_diag.c1
-rw-r--r--net/xdp/xsk_queue.h12
53 files changed, 8189 insertions, 2482 deletions
diff --git a/net/Kconfig b/net/Kconfig
index fb13460c6dab..64e30e3fd33c 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,7 @@ source "net/ieee802154/Kconfig"
source "net/mac802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
+source "net/tsn/Kconfig"
source "net/dns_resolver/Kconfig"
source "net/batman-adv/Kconfig"
source "net/openvswitch/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index fbfeb8a0bb37..b784e8a49095 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_RFKILL) += rfkill/
obj-$(CONFIG_NET_9P) += 9p/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_DCB) += dcb/
+obj-$(CONFIG_TSN) += tsn/
obj-$(CONFIG_6LOWPAN) += 6lowpan/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_MAC802154) += mac802154/
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 46812b659710..5c4a8740058a 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -32,10 +32,6 @@ static const struct rhashtable_params br_fdb_rht_params = {
};
static struct kmem_cache *br_fdb_cache __read_mostly;
-static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid);
-static void fdb_notify(struct net_bridge *br,
- const struct net_bridge_fdb_entry *, int, bool);
int __init br_fdb_init(void)
{
@@ -87,6 +83,128 @@ static void fdb_rcu_free(struct rcu_head *head)
kmem_cache_free(br_fdb_cache, ent);
}
+static int fdb_to_nud(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb)
+{
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+ return NUD_PERMANENT;
+ else if (test_bit(BR_FDB_STATIC, &fdb->flags))
+ return NUD_NOARP;
+ else if (has_expired(br, fdb))
+ return NUD_STALE;
+ else
+ return NUD_REACHABLE;
+}
+
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb,
+ u32 portid, u32 seq, int type, unsigned int flags)
+{
+ const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
+ unsigned long now = jiffies;
+ struct nda_cacheinfo ci;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = 0;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
+ ndm->ndm_state = fdb_to_nud(br, fdb);
+
+ if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+ ndm->ndm_flags |= NTF_OFFLOADED;
+ if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+ ndm->ndm_flags |= NTF_EXT_LEARNED;
+ if (test_bit(BR_FDB_STICKY, &fdb->flags))
+ ndm->ndm_flags |= NTF_STICKY;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
+ goto nla_put_failure;
+ ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_confirmed = 0;
+ ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_refcnt = 0;
+ if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
+
+ if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+ &fdb->key.vlan_id))
+ goto nla_put_failure;
+
+ if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+ struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
+ u8 notify_bits = FDB_NOTIFY_BIT;
+
+ if (!nest)
+ goto nla_put_failure;
+ if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+ notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
+
+ if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
+ nla_nest_cancel(skb, nest);
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, nest);
+ }
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t fdb_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ + nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+ + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
+}
+
+static void fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type,
+ bool swdev_notify)
+{
+ struct net *net = dev_net(br->dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ if (swdev_notify)
+ br_switchdev_fdb_notify(br, fdb, type);
+
+ skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+
+ err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ return;
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
const unsigned char *addr,
__u16 vid)
@@ -257,6 +375,66 @@ void br_fdb_find_delete_local(struct net_bridge *br,
spin_unlock_bh(&br->hash_lock);
}
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
+ struct net_bridge_port *source,
+ const unsigned char *addr,
+ __u16 vid,
+ unsigned long flags)
+{
+ struct net_bridge_fdb_entry *fdb;
+ int err;
+
+ fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
+ if (!fdb)
+ return NULL;
+
+ memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
+ WRITE_ONCE(fdb->dst, source);
+ fdb->key.vlan_id = vid;
+ fdb->flags = flags;
+ fdb->updated = fdb->used = jiffies;
+ err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
+ br_fdb_rht_params);
+ if (err) {
+ kmem_cache_free(br_fdb_cache, fdb);
+ return NULL;
+ }
+
+ hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+
+ return fdb;
+}
+
+static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!is_valid_ether_addr(addr))
+ return -EINVAL;
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb) {
+ /* it is okay to have multiple ports with same
+ * address, just use the first one.
+ */
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+ return 0;
+ br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
+ source ? source->dev->name : br->dev->name, addr, vid);
+ fdb_delete(br, fdb, true);
+ }
+
+ fdb = fdb_create(br, source, addr, vid,
+ BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
+ if (!fdb)
+ return -ENOMEM;
+
+ fdb_add_hw_addr(br, addr);
+ fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+ return 0;
+}
+
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge_vlan_group *vg;
@@ -283,7 +461,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
insert:
/* insert new address, may fail if invalid address or dup. */
- fdb_insert(br, p, newaddr, 0);
+ fdb_add_local(br, p, newaddr, 0);
if (!vg || !vg->num_vlans)
goto done;
@@ -293,7 +471,7 @@ insert:
* from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist)
- fdb_insert(br, p, newaddr, v->vid);
+ fdb_add_local(br, p, newaddr, v->vid);
done:
spin_unlock_bh(&br->hash_lock);
@@ -313,7 +491,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
- fdb_insert(br, NULL, newaddr, 0);
+ fdb_add_local(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
if (!vg || !vg->num_vlans)
goto out;
@@ -328,7 +506,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
- fdb_insert(br, NULL, newaddr, v->vid);
+ fdb_add_local(br, NULL, newaddr, v->vid);
}
out:
spin_unlock_bh(&br->hash_lock);
@@ -503,71 +681,14 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
return num;
}
-static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
- struct net_bridge_port *source,
- const unsigned char *addr,
- __u16 vid,
- unsigned long flags)
-{
- struct net_bridge_fdb_entry *fdb;
-
- fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
- if (fdb) {
- memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
- WRITE_ONCE(fdb->dst, source);
- fdb->key.vlan_id = vid;
- fdb->flags = flags;
- fdb->updated = fdb->used = jiffies;
- if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
- &fdb->rhnode,
- br_fdb_rht_params)) {
- kmem_cache_free(br_fdb_cache, fdb);
- fdb = NULL;
- } else {
- hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
- }
- }
- return fdb;
-}
-
-static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid)
-{
- struct net_bridge_fdb_entry *fdb;
-
- if (!is_valid_ether_addr(addr))
- return -EINVAL;
-
- fdb = br_fdb_find(br, addr, vid);
- if (fdb) {
- /* it is okay to have multiple ports with same
- * address, just use the first one.
- */
- if (test_bit(BR_FDB_LOCAL, &fdb->flags))
- return 0;
- br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
- source ? source->dev->name : br->dev->name, addr, vid);
- fdb_delete(br, fdb, true);
- }
-
- fdb = fdb_create(br, source, addr, vid,
- BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
- if (!fdb)
- return -ENOMEM;
-
- fdb_add_hw_addr(br, addr);
- fdb_notify(br, fdb, RTM_NEWNEIGH, true);
- return 0;
-}
-
/* Add entry for local address of interface */
-int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid)
+int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid)
{
int ret;
spin_lock_bh(&br->hash_lock);
- ret = fdb_insert(br, source, addr, vid);
+ ret = fdb_add_local(br, source, addr, vid);
spin_unlock_bh(&br->hash_lock);
return ret;
}
@@ -638,182 +759,6 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
}
}
-static int fdb_to_nud(const struct net_bridge *br,
- const struct net_bridge_fdb_entry *fdb)
-{
- if (test_bit(BR_FDB_LOCAL, &fdb->flags))
- return NUD_PERMANENT;
- else if (test_bit(BR_FDB_STATIC, &fdb->flags))
- return NUD_NOARP;
- else if (has_expired(br, fdb))
- return NUD_STALE;
- else
- return NUD_REACHABLE;
-}
-
-static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
- const struct net_bridge_fdb_entry *fdb,
- u32 portid, u32 seq, int type, unsigned int flags)
-{
- const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
- unsigned long now = jiffies;
- struct nda_cacheinfo ci;
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (nlh == NULL)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = 0;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
- ndm->ndm_state = fdb_to_nud(br, fdb);
-
- if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
- ndm->ndm_flags |= NTF_OFFLOADED;
- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
- ndm->ndm_flags |= NTF_EXT_LEARNED;
- if (test_bit(BR_FDB_STICKY, &fdb->flags))
- ndm->ndm_flags |= NTF_STICKY;
-
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
- goto nla_put_failure;
- if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
- goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
- ci.ndm_confirmed = 0;
- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
- ci.ndm_refcnt = 0;
- if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
- goto nla_put_failure;
-
- if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
- &fdb->key.vlan_id))
- goto nla_put_failure;
-
- if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
- struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
- u8 notify_bits = FDB_NOTIFY_BIT;
-
- if (!nest)
- goto nla_put_failure;
- if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
- notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
-
- if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
- nla_nest_cancel(skb, nest);
- goto nla_put_failure;
- }
-
- nla_nest_end(skb, nest);
- }
-
- nlmsg_end(skb, nlh);
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
-}
-
-static inline size_t fdb_nlmsg_size(void)
-{
- return NLMSG_ALIGN(sizeof(struct ndmsg))
- + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
- + nla_total_size(sizeof(u32)) /* NDA_MASTER */
- + nla_total_size(sizeof(u16)) /* NDA_VLAN */
- + nla_total_size(sizeof(struct nda_cacheinfo))
- + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
- + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
-}
-
-static int br_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
- const struct net_bridge_fdb_entry *fdb,
- unsigned long action, const void *ctx)
-{
- const struct net_bridge_port *p = READ_ONCE(fdb->dst);
- struct switchdev_notifier_fdb_info item;
- int err;
-
- item.addr = fdb->key.addr.addr;
- item.vid = fdb->key.vlan_id;
- item.added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
- item.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
- item.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
- item.info.dev = (!p || item.is_local) ? br->dev : p->dev;
- item.info.ctx = ctx;
-
- err = nb->notifier_call(nb, action, &item);
- return notifier_to_errno(err);
-}
-
-int br_fdb_replay(const struct net_device *br_dev, const void *ctx, bool adding,
- struct notifier_block *nb)
-{
- struct net_bridge_fdb_entry *fdb;
- struct net_bridge *br;
- unsigned long action;
- int err = 0;
-
- if (!nb)
- return 0;
-
- if (!netif_is_bridge_master(br_dev))
- return -EINVAL;
-
- br = netdev_priv(br_dev);
-
- if (adding)
- action = SWITCHDEV_FDB_ADD_TO_DEVICE;
- else
- action = SWITCHDEV_FDB_DEL_TO_DEVICE;
-
- rcu_read_lock();
-
- hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
- err = br_fdb_replay_one(br, nb, fdb, action, ctx);
- if (err)
- break;
- }
-
- rcu_read_unlock();
-
- return err;
-}
-
-static void fdb_notify(struct net_bridge *br,
- const struct net_bridge_fdb_entry *fdb, int type,
- bool swdev_notify)
-{
- struct net *net = dev_net(br->dev);
- struct sk_buff *skb;
- int err = -ENOBUFS;
-
- if (swdev_notify)
- br_switchdev_fdb_notify(br, fdb, type);
-
- skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
- if (skb == NULL)
- goto errout;
-
- err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
- if (err < 0) {
- /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- goto errout;
- }
- rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
- return;
-errout:
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
-}
-
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 4a02f8bb278a..abe4316b855d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -670,7 +670,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
else
netdev_set_rx_headroom(dev, br_hr);
- if (br_fdb_insert(br, p, dev->dev_addr, 0))
+ if (br_fdb_add_local(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
if (br->dev->addr_assign_type != NET_ADDR_SET) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0281453f7766..b4468b3d15c5 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -251,14 +251,16 @@ static int __mdb_fill_info(struct sk_buff *skb,
__mdb_entry_fill_flags(&e, flags);
e.ifindex = ifindex;
e.vid = mp->addr.vid;
- if (mp->addr.proto == htons(ETH_P_IP))
+ if (mp->addr.proto == htons(ETH_P_IP)) {
e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- else if (mp->addr.proto == htons(ETH_P_IPV6))
+ } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
- else
+ } else {
ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
+ e.state = MDB_PG_FLAGS_PERMANENT;
+ }
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
MDBA_MDB_ENTRY_INFO);
@@ -552,252 +554,16 @@ out:
return nlmsg_size;
}
-struct br_mdb_complete_info {
- struct net_bridge_port *port;
- struct br_ip ip;
-};
-
-static void br_mdb_complete(struct net_device *dev, int err, void *priv)
-{
- struct br_mdb_complete_info *data = priv;
- struct net_bridge_port_group __rcu **pp;
- struct net_bridge_port_group *p;
- struct net_bridge_mdb_entry *mp;
- struct net_bridge_port *port = data->port;
- struct net_bridge *br = port->br;
-
- if (err)
- goto err;
-
- spin_lock_bh(&br->multicast_lock);
- mp = br_mdb_ip_get(br, &data->ip);
- if (!mp)
- goto out;
- for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
- pp = &p->next) {
- if (p->key.port != port)
- continue;
- p->flags |= MDB_PG_FLAGS_OFFLOAD;
- }
-out:
- spin_unlock_bh(&br->multicast_lock);
-err:
- kfree(priv);
-}
-
-static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
- const struct net_bridge_mdb_entry *mp)
-{
- if (mp->addr.proto == htons(ETH_P_IP))
- ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
-#if IS_ENABLED(CONFIG_IPV6)
- else if (mp->addr.proto == htons(ETH_P_IPV6))
- ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
-#endif
- else
- ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
-
- mdb->vid = mp->addr.vid;
-}
-
-static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- unsigned long action, const void *ctx,
- struct netlink_ext_ack *extack)
-{
- struct switchdev_notifier_port_obj_info obj_info = {
- .info = {
- .dev = dev,
- .extack = extack,
- .ctx = ctx,
- },
- .obj = &mdb->obj,
- };
- int err;
-
- err = nb->notifier_call(nb, action, &obj_info);
- return notifier_to_errno(err);
-}
-
-static int br_mdb_queue_one(struct list_head *mdb_list,
- enum switchdev_obj_id id,
- const struct net_bridge_mdb_entry *mp,
- struct net_device *orig_dev)
-{
- struct switchdev_obj_port_mdb *mdb;
-
- mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
- if (!mdb)
- return -ENOMEM;
-
- mdb->obj.id = id;
- mdb->obj.orig_dev = orig_dev;
- br_switchdev_mdb_populate(mdb, mp);
- list_add_tail(&mdb->obj.list, mdb_list);
-
- return 0;
-}
-
-int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
-{
- const struct net_bridge_mdb_entry *mp;
- struct switchdev_obj *obj, *tmp;
- struct net_bridge *br;
- unsigned long action;
- LIST_HEAD(mdb_list);
- int err = 0;
-
- ASSERT_RTNL();
-
- if (!nb)
- return 0;
-
- if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
- return -EINVAL;
-
- br = netdev_priv(br_dev);
-
- if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
- return 0;
-
- /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
- * because the write-side protection is br->multicast_lock. But we
- * need to emulate the [ blocking ] calling context of a regular
- * switchdev event, so since both br->multicast_lock and RCU read side
- * critical sections are atomic, we have no choice but to pick the RCU
- * read side lock, queue up all our events, leave the critical section
- * and notify switchdev from blocking context.
- */
- rcu_read_lock();
-
- hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
- struct net_bridge_port_group __rcu * const *pp;
- const struct net_bridge_port_group *p;
-
- if (mp->host_joined) {
- err = br_mdb_queue_one(&mdb_list,
- SWITCHDEV_OBJ_ID_HOST_MDB,
- mp, br_dev);
- if (err) {
- rcu_read_unlock();
- goto out_free_mdb;
- }
- }
-
- for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
- pp = &p->next) {
- if (p->key.port->dev != dev)
- continue;
-
- err = br_mdb_queue_one(&mdb_list,
- SWITCHDEV_OBJ_ID_PORT_MDB,
- mp, dev);
- if (err) {
- rcu_read_unlock();
- goto out_free_mdb;
- }
- }
- }
-
- rcu_read_unlock();
-
- if (adding)
- action = SWITCHDEV_PORT_OBJ_ADD;
- else
- action = SWITCHDEV_PORT_OBJ_DEL;
-
- list_for_each_entry(obj, &mdb_list, list) {
- err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- action, ctx, extack);
- if (err)
- goto out_free_mdb;
- }
-
-out_free_mdb:
- list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
- list_del(&obj->list);
- kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
- }
-
- return err;
-}
-
-static void br_mdb_switchdev_host_port(struct net_device *dev,
- struct net_device *lower_dev,
- struct net_bridge_mdb_entry *mp,
- int type)
-{
- struct switchdev_obj_port_mdb mdb = {
- .obj = {
- .id = SWITCHDEV_OBJ_ID_HOST_MDB,
- .flags = SWITCHDEV_F_DEFER,
- .orig_dev = dev,
- },
- };
-
- br_switchdev_mdb_populate(&mdb, mp);
-
- switch (type) {
- case RTM_NEWMDB:
- switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
- break;
- case RTM_DELMDB:
- switchdev_port_obj_del(lower_dev, &mdb.obj);
- break;
- }
-}
-
-static void br_mdb_switchdev_host(struct net_device *dev,
- struct net_bridge_mdb_entry *mp, int type)
-{
- struct net_device *lower_dev;
- struct list_head *iter;
-
- netdev_for_each_lower_dev(dev, lower_dev, iter)
- br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
-}
-
void br_mdb_notify(struct net_device *dev,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
int type)
{
- struct br_mdb_complete_info *complete_info;
- struct switchdev_obj_port_mdb mdb = {
- .obj = {
- .id = SWITCHDEV_OBJ_ID_PORT_MDB,
- .flags = SWITCHDEV_F_DEFER,
- },
- };
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
- if (pg) {
- br_switchdev_mdb_populate(&mdb, mp);
-
- mdb.obj.orig_dev = pg->key.port->dev;
- switch (type) {
- case RTM_NEWMDB:
- complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
- if (!complete_info)
- break;
- complete_info->port = pg->key.port;
- complete_info->ip = mp->addr;
- mdb.obj.complete_priv = complete_info;
- mdb.obj.complete = br_mdb_complete;
- if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
- kfree(complete_info);
- break;
- case RTM_DELMDB:
- switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
- break;
- }
- } else {
- br_mdb_switchdev_host(dev, mp, type);
- }
+ br_switchdev_mdb_notify(dev, mp, pg, type);
skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
if (!skb)
@@ -1109,8 +875,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
return -EINVAL;
/* host join errors which can happen before creating the group */
- if (!port) {
- /* don't allow any flags for host-joined groups */
+ if (!port && !br_group_is_l2(&group)) {
+ /* don't allow any flags for host-joined IP groups */
if (entry->state) {
NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
return -EINVAL;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index bd218c2b2cd9..f6df54ae170c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -769,8 +769,8 @@ struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
unsigned long off);
-int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid);
+int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, unsigned long flags);
@@ -794,8 +794,6 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
bool swdev_notify);
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, bool offloaded);
-int br_fdb_replay(const struct net_device *br_dev, const void *ctx, bool adding,
- struct notifier_block *nb);
/* br_forward.c */
enum br_pkt_type {
@@ -960,9 +958,6 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
struct netlink_ext_ack *extack);
bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on);
-int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack);
int br_rports_fill_info(struct sk_buff *skb,
const struct net_bridge_mcast *brmctx);
int br_multicast_dump_querier_state(struct sk_buff *skb,
@@ -1402,14 +1397,6 @@ static inline bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan,
return false;
}
-static inline int br_mdb_replay(struct net_device *br_dev,
- struct net_device *dev, const void *ctx,
- bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
static inline bool
br_multicast_ctx_options_equal(const struct net_bridge_mcast *brmctx1,
const struct net_bridge_mcast *brmctx2)
@@ -1467,9 +1454,6 @@ void br_vlan_notify(const struct net_bridge *br,
const struct net_bridge_port *p,
u16 vid, u16 vid_range,
int cmd);
-int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack);
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end);
@@ -1716,13 +1700,11 @@ static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
return true;
}
-static inline int br_vlan_replay(struct net_device *br_dev,
- struct net_device *dev, const void *ctx,
- bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
+static inline u16 br_vlan_flags(const struct net_bridge_vlan *v, u16 pvid)
{
- return -EOPNOTSUPP;
+ return 0;
}
+
#endif
/* br_vlan_options.c */
@@ -1997,8 +1979,12 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
struct netlink_ext_ack *extack);
void br_switchdev_fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type);
+void br_switchdev_mdb_notify(struct net_device *dev,
+ struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ int type);
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
- struct netlink_ext_ack *extack);
+ bool changed, struct netlink_ext_ack *extack);
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid);
void br_switchdev_init(struct net_bridge *br);
@@ -2065,8 +2051,8 @@ static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
return 0;
}
-static inline int br_switchdev_port_vlan_add(struct net_device *dev,
- u16 vid, u16 flags,
+static inline int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid,
+ u16 flags, bool changed,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
@@ -2083,6 +2069,13 @@ br_switchdev_fdb_notify(struct net_bridge *br,
{
}
+static inline void br_switchdev_mdb_notify(struct net_device *dev,
+ struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ int type)
+{
+}
+
static inline void br_switchdev_frame_unmark(struct sk_buff *skb)
{
}
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 6bf518d78f02..59fcabd08ef1 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -4,6 +4,7 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <net/ip.h>
#include <net/switchdev.h>
#include "br_private.h"
@@ -122,40 +123,51 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
return 0;
}
+static void br_switchdev_fdb_populate(struct net_bridge *br,
+ struct switchdev_notifier_fdb_info *item,
+ const struct net_bridge_fdb_entry *fdb,
+ const void *ctx)
+{
+ const struct net_bridge_port *p = READ_ONCE(fdb->dst);
+
+ item->addr = fdb->key.addr.addr;
+ item->vid = fdb->key.vlan_id;
+ item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+ item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
+ item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
+ item->info.ctx = ctx;
+}
+
void
br_switchdev_fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type)
{
- const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
- struct switchdev_notifier_fdb_info info = {
- .addr = fdb->key.addr.addr,
- .vid = fdb->key.vlan_id,
- .added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags),
- .is_local = test_bit(BR_FDB_LOCAL, &fdb->flags),
- .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags),
- };
- struct net_device *dev = (!dst || info.is_local) ? br->dev : dst->dev;
+ struct switchdev_notifier_fdb_info item;
+
+ br_switchdev_fdb_populate(br, &item, fdb, NULL);
switch (type) {
case RTM_DELNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
- dev, &info.info, NULL);
+ item.info.dev, &item.info, NULL);
break;
case RTM_NEWNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
- dev, &info.info, NULL);
+ item.info.dev, &item.info, NULL);
break;
}
}
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool changed, struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan v = {
.obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.flags = flags,
.vid = vid,
+ .changed = changed,
};
return switchdev_port_obj_add(dev, &v.obj, extack);
@@ -270,6 +282,406 @@ static void nbp_switchdev_del(struct net_bridge_port *p)
}
}
+static int
+br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
+ const struct net_bridge_fdb_entry *fdb,
+ unsigned long action, const void *ctx)
+{
+ struct switchdev_notifier_fdb_info item;
+ int err;
+
+ br_switchdev_fdb_populate(br, &item, fdb, ctx);
+
+ err = nb->notifier_call(nb, action, &item);
+ return notifier_to_errno(err);
+}
+
+static int
+br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
+ bool adding, struct notifier_block *nb)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge *br;
+ unsigned long action;
+ int err = 0;
+
+ if (!nb)
+ return 0;
+
+ if (!netif_is_bridge_master(br_dev))
+ return -EINVAL;
+
+ br = netdev_priv(br_dev);
+
+ if (adding)
+ action = SWITCHDEV_FDB_ADD_TO_DEVICE;
+ else
+ action = SWITCHDEV_FDB_DEL_TO_DEVICE;
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
+ err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
+ if (err)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int
+br_switchdev_vlan_replay_one(struct notifier_block *nb,
+ struct net_device *dev,
+ struct switchdev_obj_port_vlan *vlan,
+ const void *ctx, unsigned long action,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_notifier_port_obj_info obj_info = {
+ .info = {
+ .dev = dev,
+ .extack = extack,
+ .ctx = ctx,
+ },
+ .obj = &vlan->obj,
+ };
+ int err;
+
+ err = nb->notifier_call(nb, action, &obj_info);
+ return notifier_to_errno(err);
+}
+
+static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
+ struct net_device *dev,
+ struct net_bridge_vlan_group *vg,
+ const void *ctx, unsigned long action,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_vlan *v;
+ int err = 0;
+ u16 pvid;
+
+ if (!vg)
+ return 0;
+
+ pvid = br_get_pvid(vg);
+
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.orig_dev = dev,
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .flags = br_vlan_flags(v, pvid),
+ .vid = v->vid,
+ };
+
+ if (!br_vlan_should_use(v))
+ continue;
+
+ err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
+ action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int br_switchdev_vlan_replay(struct net_device *br_dev,
+ const void *ctx, bool adding,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge *br = netdev_priv(br_dev);
+ struct net_bridge_port *p;
+ unsigned long action;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (!nb)
+ return 0;
+
+ if (!netif_is_bridge_master(br_dev))
+ return -EINVAL;
+
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
+ err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
+ ctx, action, extack);
+ if (err)
+ return err;
+
+ list_for_each_entry(p, &br->port_list, list) {
+ struct net_device *dev = p->dev;
+
+ err = br_switchdev_vlan_replay_group(nb, dev,
+ nbp_vlan_group(p),
+ ctx, action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+struct br_switchdev_mdb_complete_info {
+ struct net_bridge_port *port;
+ struct br_ip ip;
+};
+
+static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
+{
+ struct br_switchdev_mdb_complete_info *data = priv;
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_mdb_entry *mp;
+ struct net_bridge_port *port = data->port;
+ struct net_bridge *br = port->br;
+
+ if (err)
+ goto err;
+
+ spin_lock_bh(&br->multicast_lock);
+ mp = br_mdb_ip_get(br, &data->ip);
+ if (!mp)
+ goto out;
+ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+ pp = &p->next) {
+ if (p->key.port != port)
+ continue;
+ p->flags |= MDB_PG_FLAGS_OFFLOAD;
+ }
+out:
+ spin_unlock_bh(&br->multicast_lock);
+err:
+ kfree(priv);
+}
+
+static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
+ const struct net_bridge_mdb_entry *mp)
+{
+ if (mp->addr.proto == htons(ETH_P_IP))
+ ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (mp->addr.proto == htons(ETH_P_IPV6))
+ ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
+#endif
+ else
+ ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
+
+ mdb->vid = mp->addr.vid;
+}
+
+static void br_switchdev_host_mdb_one(struct net_device *dev,
+ struct net_device *lower_dev,
+ struct net_bridge_mdb_entry *mp,
+ int type)
+{
+ struct switchdev_obj_port_mdb mdb = {
+ .obj = {
+ .id = SWITCHDEV_OBJ_ID_HOST_MDB,
+ .flags = SWITCHDEV_F_DEFER,
+ .orig_dev = dev,
+ },
+ };
+
+ br_switchdev_mdb_populate(&mdb, mp);
+
+ switch (type) {
+ case RTM_NEWMDB:
+ switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
+ break;
+ case RTM_DELMDB:
+ switchdev_port_obj_del(lower_dev, &mdb.obj);
+ break;
+ }
+}
+
+static void br_switchdev_host_mdb(struct net_device *dev,
+ struct net_bridge_mdb_entry *mp, int type)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter)
+ br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
+}
+
+static int
+br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb,
+ unsigned long action, const void *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_notifier_port_obj_info obj_info = {
+ .info = {
+ .dev = dev,
+ .extack = extack,
+ .ctx = ctx,
+ },
+ .obj = &mdb->obj,
+ };
+ int err;
+
+ err = nb->notifier_call(nb, action, &obj_info);
+ return notifier_to_errno(err);
+}
+
+static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
+ enum switchdev_obj_id id,
+ const struct net_bridge_mdb_entry *mp,
+ struct net_device *orig_dev)
+{
+ struct switchdev_obj_port_mdb *mdb;
+
+ mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
+ if (!mdb)
+ return -ENOMEM;
+
+ mdb->obj.id = id;
+ mdb->obj.orig_dev = orig_dev;
+ br_switchdev_mdb_populate(mdb, mp);
+ list_add_tail(&mdb->obj.list, mdb_list);
+
+ return 0;
+}
+
+void br_switchdev_mdb_notify(struct net_device *dev,
+ struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ int type)
+{
+ struct br_switchdev_mdb_complete_info *complete_info;
+ struct switchdev_obj_port_mdb mdb = {
+ .obj = {
+ .id = SWITCHDEV_OBJ_ID_PORT_MDB,
+ .flags = SWITCHDEV_F_DEFER,
+ },
+ };
+
+ if (!pg)
+ return br_switchdev_host_mdb(dev, mp, type);
+
+ br_switchdev_mdb_populate(&mdb, mp);
+
+ mdb.obj.orig_dev = pg->key.port->dev;
+ switch (type) {
+ case RTM_NEWMDB:
+ complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+ if (!complete_info)
+ break;
+ complete_info->port = pg->key.port;
+ complete_info->ip = mp->addr;
+ mdb.obj.complete_priv = complete_info;
+ mdb.obj.complete = br_switchdev_mdb_complete;
+ if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
+ kfree(complete_info);
+ break;
+ case RTM_DELMDB:
+ switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
+ break;
+ }
+}
+#endif
+
+static int
+br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
+ const void *ctx, bool adding, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ const struct net_bridge_mdb_entry *mp;
+ struct switchdev_obj *obj, *tmp;
+ struct net_bridge *br;
+ unsigned long action;
+ LIST_HEAD(mdb_list);
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ if (!nb)
+ return 0;
+
+ if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
+ return -EINVAL;
+
+ br = netdev_priv(br_dev);
+
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ return 0;
+
+ /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
+ * because the write-side protection is br->multicast_lock. But we
+ * need to emulate the [ blocking ] calling context of a regular
+ * switchdev event, so since both br->multicast_lock and RCU read side
+ * critical sections are atomic, we have no choice but to pick the RCU
+ * read side lock, queue up all our events, leave the critical section
+ * and notify switchdev from blocking context.
+ */
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
+ struct net_bridge_port_group __rcu * const *pp;
+ const struct net_bridge_port_group *p;
+
+ if (mp->host_joined) {
+ err = br_switchdev_mdb_queue_one(&mdb_list,
+ SWITCHDEV_OBJ_ID_HOST_MDB,
+ mp, br_dev);
+ if (err) {
+ rcu_read_unlock();
+ goto out_free_mdb;
+ }
+ }
+
+ for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
+ pp = &p->next) {
+ if (p->key.port->dev != dev)
+ continue;
+
+ err = br_switchdev_mdb_queue_one(&mdb_list,
+ SWITCHDEV_OBJ_ID_PORT_MDB,
+ mp, dev);
+ if (err) {
+ rcu_read_unlock();
+ goto out_free_mdb;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
+ list_for_each_entry(obj, &mdb_list, list) {
+ err = br_switchdev_mdb_replay_one(nb, dev,
+ SWITCHDEV_OBJ_PORT_MDB(obj),
+ action, ctx, extack);
+ if (err)
+ goto out_free_mdb;
+ }
+
+out_free_mdb:
+ list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
+ list_del(&obj->list);
+ kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
+ }
+
+ if (err)
+ return err;
+#endif
+
+ return 0;
+}
+
static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb,
@@ -279,15 +691,16 @@ static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
struct net_device *dev = p->dev;
int err;
- err = br_vlan_replay(br_dev, dev, ctx, true, blocking_nb, extack);
+ err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
if (err && err != -EOPNOTSUPP)
return err;
- err = br_mdb_replay(br_dev, dev, ctx, true, blocking_nb, extack);
+ err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
+ extack);
if (err && err != -EOPNOTSUPP)
return err;
- err = br_fdb_replay(br_dev, ctx, true, atomic_nb);
+ err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
if (err && err != -EOPNOTSUPP)
return err;
@@ -302,11 +715,11 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
struct net_device *br_dev = p->br->dev;
struct net_device *dev = p->dev;
- br_vlan_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+ br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
- br_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+ br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
- br_fdb_replay(br_dev, ctx, false, atomic_nb);
+ br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
}
/* Let the bridge know that this port is offloaded, so that it can assign a
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 86441ff78a0f..e871c62fdd4d 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -34,53 +34,70 @@ static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}
-static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
+static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
const struct net_bridge_vlan *v)
{
if (vg->pvid == v->vid)
- return false;
+ return;
smp_wmb();
br_vlan_set_pvid_state(vg, v->state);
vg->pvid = v->vid;
-
- return true;
}
-static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
+static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
{
if (vg->pvid != vid)
- return false;
+ return;
smp_wmb();
vg->pvid = 0;
-
- return true;
}
-/* return true if anything changed, false otherwise */
-static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
+/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
+ * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
+ * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
+ */
+static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
+ bool commit)
{
struct net_bridge_vlan_group *vg;
- u16 old_flags = v->flags;
- bool ret;
+ bool change;
if (br_vlan_is_master(v))
vg = br_vlan_group(v->br);
else
vg = nbp_vlan_group(v->port);
+ /* check if anything would be changed on commit */
+ change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
+ ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
+
+ if (!commit)
+ goto out;
+
if (flags & BRIDGE_VLAN_INFO_PVID)
- ret = __vlan_add_pvid(vg, v);
+ __vlan_add_pvid(vg, v);
else
- ret = __vlan_delete_pvid(vg, v->vid);
+ __vlan_delete_pvid(vg, v->vid);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
else
v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
- return ret || !!(old_flags ^ v->flags);
+out:
+ return change;
+}
+
+static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
+{
+ return __vlan_flags_update(v, flags, false);
+}
+
+static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
+{
+ __vlan_flags_update(v, flags, true);
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
@@ -92,7 +109,7 @@ static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
- err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
if (err == -EOPNOTSUPP)
return vlan_vid_add(dev, br->vlan_proto, v->vid);
v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
@@ -284,16 +301,19 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
}
br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
} else {
- err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
- if (err && err != -EOPNOTSUPP)
- goto out;
+ if (br_vlan_should_use(v)) {
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags,
+ false, extack);
+ if (err && err != -EOPNOTSUPP)
+ goto out;
+ }
br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
}
/* Add the dev mac and count the vlan only if it's usable */
if (br_vlan_should_use(v)) {
- err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
+ err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
if (err) {
br_err(br, "failed insert local address into bridge forwarding table\n");
goto out_filt;
@@ -310,7 +330,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
goto out_fdb_insert;
__vlan_add_list(v);
- __vlan_add_flags(v, flags);
+ __vlan_flags_commit(v, flags);
br_multicast_toggle_one_vlan(v, true);
if (p)
@@ -404,6 +424,7 @@ static void __vlan_flush(const struct net_bridge *br,
{
struct net_bridge_vlan *vlan, *tmp;
u16 v_start = 0, v_end = 0;
+ int err;
__vlan_delete_pvid(vg, vg->pvid);
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
@@ -417,7 +438,13 @@ static void __vlan_flush(const struct net_bridge *br,
}
v_end = vlan->vid;
- __vlan_del(vlan);
+ err = __vlan_del(vlan);
+ if (err) {
+ br_err(br,
+ "port %u(%s) failed to delete vlan %d: %pe\n",
+ (unsigned int) p->port_no, p->dev->name,
+ vlan->vid, ERR_PTR(err));
+ }
}
/* notify about the last/whole vlan range */
@@ -670,21 +697,31 @@ static int br_vlan_add_existing(struct net_bridge *br,
u16 flags, bool *changed,
struct netlink_ext_ack *extack)
{
+ bool would_change = __vlan_flags_would_change(vlan, flags);
+ bool becomes_brentry = false;
int err;
- err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
if (!br_vlan_is_brentry(vlan)) {
/* Trying to change flags of non-existent bridge vlan */
- if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
- err = -EINVAL;
- goto err_flags;
- }
+ if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return -EINVAL;
+
+ becomes_brentry = true;
+ }
+
+ /* Master VLANs that aren't brentries weren't notified before,
+ * time to notify them now.
+ */
+ if (becomes_brentry || would_change) {
+ err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
+ would_change, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ if (becomes_brentry) {
/* It was only kept for port vlans, now make it real */
- err = br_fdb_insert(br, NULL, br->dev->dev_addr,
- vlan->vid);
+ err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
if (err) {
br_err(br, "failed to insert local address into bridge forwarding table\n");
goto err_fdb_insert;
@@ -697,13 +734,13 @@ static int br_vlan_add_existing(struct net_bridge *br,
br_multicast_toggle_one_vlan(vlan, true);
}
- if (__vlan_add_flags(vlan, flags))
+ __vlan_flags_commit(vlan, flags);
+ if (would_change)
*changed = true;
return 0;
err_fdb_insert:
-err_flags:
br_switchdev_port_vlan_del(br->dev, vlan->vid);
return err;
}
@@ -1259,11 +1296,18 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
*changed = false;
vlan = br_vlan_find(nbp_vlan_group(port), vid);
if (vlan) {
- /* Pass the flags to the hardware bridge */
- ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
- if (ret && ret != -EOPNOTSUPP)
- return ret;
- *changed = __vlan_add_flags(vlan, flags);
+ bool would_change = __vlan_flags_would_change(vlan, flags);
+
+ if (would_change) {
+ /* Pass the flags to the hardware bridge */
+ ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
+ true, extack);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ __vlan_flags_commit(vlan, flags);
+ *changed = would_change;
return 0;
}
@@ -1872,90 +1916,6 @@ out_kfree:
kfree_skb(skb);
}
-static int br_vlan_replay_one(struct notifier_block *nb,
- struct net_device *dev,
- struct switchdev_obj_port_vlan *vlan,
- const void *ctx, unsigned long action,
- struct netlink_ext_ack *extack)
-{
- struct switchdev_notifier_port_obj_info obj_info = {
- .info = {
- .dev = dev,
- .extack = extack,
- .ctx = ctx,
- },
- .obj = &vlan->obj,
- };
- int err;
-
- err = nb->notifier_call(nb, action, &obj_info);
- return notifier_to_errno(err);
-}
-
-int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
-{
- struct net_bridge_vlan_group *vg;
- struct net_bridge_vlan *v;
- struct net_bridge_port *p;
- struct net_bridge *br;
- unsigned long action;
- int err = 0;
- u16 pvid;
-
- ASSERT_RTNL();
-
- if (!nb)
- return 0;
-
- if (!netif_is_bridge_master(br_dev))
- return -EINVAL;
-
- if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
- return -EINVAL;
-
- if (netif_is_bridge_master(dev)) {
- br = netdev_priv(dev);
- vg = br_vlan_group(br);
- p = NULL;
- } else {
- p = br_port_get_rtnl(dev);
- if (WARN_ON(!p))
- return -EINVAL;
- vg = nbp_vlan_group(p);
- br = p->br;
- }
-
- if (!vg)
- return 0;
-
- if (adding)
- action = SWITCHDEV_PORT_OBJ_ADD;
- else
- action = SWITCHDEV_PORT_OBJ_DEL;
-
- pvid = br_get_pvid(vg);
-
- list_for_each_entry(v, &vg->vlan_list, vlist) {
- struct switchdev_obj_port_vlan vlan = {
- .obj.orig_dev = dev,
- .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
- .flags = br_vlan_flags(v, pvid),
- .vid = v->vid,
- };
-
- if (!br_vlan_should_use(v))
- continue;
-
- err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
- if (err)
- return err;
- }
-
- return err;
-}
-
/* check if v_curr can enter a range ending in range_end */
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *range_end)
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index fb11103fa8af..e487f3916693 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -27,6 +27,26 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions)
}
EXPORT_SYMBOL(flow_rule_alloc);
+struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
+{
+ struct flow_offload_action *fl_action;
+ int i;
+
+ fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
+ GFP_KERNEL);
+ if (!fl_action)
+ return NULL;
+
+ fl_action->action.num_entries = num_actions;
+ /* Pre-fill each action hw_stats with DONT_CARE.
+ * Caller can override this if it wants stats for a given action.
+ */
+ for (i = 0; i < num_actions; i++)
+ fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
+
+ return fl_action;
+}
+
#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match *__m = &(__rule)->match; \
struct flow_dissector *__d = (__m)->dissector; \
@@ -549,20 +569,26 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_indr_dev *this;
+ u32 count = 0;
+ int err;
mutex_lock(&flow_indr_block_lock);
+ if (bo) {
+ if (bo->command == FLOW_BLOCK_BIND)
+ indir_dev_add(data, dev, sch, type, cleanup, bo);
+ else if (bo->command == FLOW_BLOCK_UNBIND)
+ indir_dev_remove(data);
+ }
- if (bo->command == FLOW_BLOCK_BIND)
- indir_dev_add(data, dev, sch, type, cleanup, bo);
- else if (bo->command == FLOW_BLOCK_UNBIND)
- indir_dev_remove(data);
-
- list_for_each_entry(this, &flow_block_indr_dev_list, list)
- this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
+ list_for_each_entry(this, &flow_block_indr_dev_list, list) {
+ err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
+ if (!err)
+ count++;
+ }
mutex_unlock(&flow_indr_block_lock);
- return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
+ return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
}
EXPORT_SYMBOL(flow_indr_dev_setup_offload);
diff --git a/net/core/of_net.c b/net/core/of_net.c
index dbac3a172a11..17299777435b 100644
--- a/net/core/of_net.c
+++ b/net/core/of_net.c
@@ -140,6 +140,10 @@ int of_get_mac_address(struct device_node *np, u8 *addr)
if (!ret)
return 0;
+ ret = of_get_mac_addr(np, "nvmem-mac-address", addr);
+ if (!ret)
+ return 0;
+
return of_get_mac_addr_nvmem(np, addr);
}
EXPORT_SYMBOL(of_get_mac_address);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d4b25d6fd01d..7cee8644af22 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -994,6 +994,32 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
}
EXPORT_SYMBOL(napi_consume_skb);
+/**
+ * skb_recycle - clean up an skb for reuse
+ * @skb: buffer
+ *
+ * Recycles the skb to be reused as a receive buffer. This
+ * function does any necessary reference count dropping, and
+ * cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+ u8 head_frag = skb->head_frag;
+
+ skb_release_head_state(skb);
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb->head_frag = head_frag;
+ skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
/* Make sure a field is enclosed inside headers_start/headers_end section */
#define CHECK_SKB_FIELD(field) \
BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index d8ee15f1c7a9..8cb87b5067ee 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -92,13 +92,6 @@ config NET_DSA_TAG_KSZ
Say Y if you want to enable support for tagging frames for the
Microchip 8795/9477/9893 families of switches.
-config NET_DSA_TAG_RTL4_A
- tristate "Tag driver for Realtek 4 byte protocol A tags"
- help
- Say Y or M if you want to enable support for tagging frames for the
- Realtek switches with 4 byte protocol A tags, sich as found in
- the Realtek RTL8366RB.
-
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
select PACKING
@@ -126,6 +119,19 @@ config NET_DSA_TAG_QCA
Say Y or M if you want to enable support for tagging frames for
the Qualcomm Atheros QCA8K switches.
+config NET_DSA_TAG_RTL4_A
+ tristate "Tag driver for Realtek 4 byte protocol A tags"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Realtek switches with 4 byte protocol A tags, sich as found in
+ the Realtek RTL8366RB.
+
+config NET_DSA_TAG_RTL8_4
+ tristate "Tag driver for Realtek 8 byte protocol 4 tags"
+ help
+ Say Y or M if you want to enable support for tagging frames for Realtek
+ switches with 8 byte protocol 4 tags, such as the Realtek RTL8365MB-VC.
+
config NET_DSA_TAG_LAN9303
tristate "Tag driver for SMSC/Microchip LAN9303 family of switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 67ea009f242c..9f75820e7c98 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -10,12 +10,13 @@ obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
-obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 41f36ad8b0ec..1ba954195f19 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -280,23 +280,22 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
}
#ifdef CONFIG_PM_SLEEP
-static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+static bool dsa_port_is_initialized(const struct dsa_port *dp)
{
- const struct dsa_port *dp = dsa_to_port(ds, p);
-
return dp->type == DSA_PORT_TYPE_USER && dp->slave;
}
int dsa_switch_suspend(struct dsa_switch *ds)
{
- int i, ret = 0;
+ struct dsa_port *dp;
+ int ret = 0;
/* Suspend slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
+ ret = dsa_slave_suspend(dp->slave);
if (ret)
return ret;
}
@@ -310,7 +309,8 @@ EXPORT_SYMBOL_GPL(dsa_switch_suspend);
int dsa_switch_resume(struct dsa_switch *ds)
{
- int i, ret = 0;
+ struct dsa_port *dp;
+ int ret = 0;
if (ds->ops->resume)
ret = ds->ops->resume(ds);
@@ -319,11 +319,11 @@ int dsa_switch_resume(struct dsa_switch *ds)
return ret;
/* Resume slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
+ ret = dsa_slave_resume(dp->slave);
if (ret)
return ret;
}
@@ -466,6 +466,106 @@ struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
}
EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
+int dsa_port_walk_fdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ mutex_lock(&dp->addr_lists_lock);
+
+ list_for_each_entry(a, &dp->fdbs, list) {
+ err = cb(ds, port, a->addr, a->vid, a->db);
+ if (err)
+ break;
+ }
+
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dsa_port_walk_fdbs);
+
+int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+ int err;
+
+ mutex_lock(&dp->addr_lists_lock);
+
+ list_for_each_entry(a, &dp->mdbs, list) {
+ err = cb(ds, port, a->addr, a->vid, a->db);
+ if (err)
+ break;
+ }
+
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dsa_port_walk_mdbs);
+
+bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
+{
+ if (a->type != b->type)
+ return false;
+
+ switch (a->type) {
+ case DSA_DB_PORT:
+ return a->dp == b->dp;
+ case DSA_DB_LAG:
+ return a->lag.dev == b->lag.dev;
+ case DSA_DB_BRIDGE:
+ return a->bridge.num == b->bridge.num;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+}
+
+bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+
+ lockdep_assert_held(&dp->addr_lists_lock);
+
+ list_for_each_entry(a, &dp->fdbs, list) {
+ if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
+ continue;
+
+ if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
+
+bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_mac_addr *a;
+
+ lockdep_assert_held(&dp->addr_lists_lock);
+
+ list_for_each_entry(a, &dp->mdbs, list) {
+ if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
+ continue;
+
+ if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
+
static int __init dsa_init_module(void)
{
int rc;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 34763f575c30..9570ded77774 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -71,27 +71,24 @@ int dsa_broadcast(unsigned long e, void *v)
}
/**
- * dsa_lag_map() - Map LAG netdev to a linear LAG ID
+ * dsa_lag_map() - Map LAG structure to a linear LAG array
* @dst: Tree in which to record the mapping.
- * @lag: Netdev that is to be mapped to an ID.
+ * @lag: LAG structure that is to be mapped to the tree's array.
*
- * dsa_lag_id/dsa_lag_dev can then be used to translate between the
+ * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
-void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
+void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
- if (dsa_lag_id(dst, lag) >= 0)
- /* Already mapped */
- return;
-
- for (id = 0; id < dst->lags_len; id++) {
- if (!dsa_lag_dev(dst, id)) {
- dst->lags[id] = lag;
+ for (id = 1; id <= dst->lags_len; id++) {
+ if (!dsa_lag_by_id(dst, id)) {
+ dst->lags[id - 1] = lag;
+ lag->id = id;
return;
}
}
@@ -107,57 +104,82 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
- * @lag: Netdev that was mapped.
+ * @lag: LAG structure that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
-void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
+void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
- struct dsa_port *dp;
unsigned int id;
- dsa_lag_foreach_port(dp, dst, lag)
- /* There are remaining users of this mapping */
- return;
-
dsa_lags_foreach_id(id, dst) {
- if (dsa_lag_dev(dst, id) == lag) {
- dst->lags[id] = NULL;
+ if (dsa_lag_by_id(dst, id) == lag) {
+ dst->lags[id - 1] = NULL;
+ lag->id = 0;
break;
}
}
}
+struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
+ const struct net_device *lag_dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_lag_dev_get(dp) == lag_dev)
+ return dp->lag;
+
+ return NULL;
+}
+
+struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
+ const struct net_device *br)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_bridge_dev_get(dp) == br)
+ return dp->bridge;
+
+ return NULL;
+}
+
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
struct dsa_switch_tree *dst;
- struct dsa_port *dp;
- /* When preparing the offload for a port, it will have a valid
- * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
- * However there might be other ports having the same dp->bridge_dev
- * and a valid dp->bridge_num, so just ignore this port.
- */
- list_for_each_entry(dst, &dsa_tree_list, list)
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->bridge_dev == bridge_dev &&
- dp->bridge_num != -1)
- return dp->bridge_num;
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ struct dsa_bridge *bridge;
- return -1;
+ bridge = dsa_tree_bridge_find(dst, bridge_dev);
+ if (bridge)
+ return bridge->num;
+ }
+
+ return 0;
}
-int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
+unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
{
- int bridge_num = dsa_bridge_num_find(bridge_dev);
+ unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
+
+ /* Switches without FDB isolation support don't get unique
+ * bridge numbering
+ */
+ if (!max)
+ return 0;
- if (bridge_num < 0) {
- /* First port that offloads TX forwarding for this bridge */
- bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
- DSA_MAX_NUM_OFFLOADING_BRIDGES);
+ if (!bridge_num) {
+ /* First port that requests FDB isolation or TX forwarding
+ * offload for this bridge
+ */
+ bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
+ DSA_MAX_NUM_OFFLOADING_BRIDGES,
+ 1);
if (bridge_num >= max)
- return -1;
+ return 0;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
@@ -165,13 +187,14 @@ int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
return bridge_num;
}
-void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
+void dsa_bridge_num_put(const struct net_device *bridge_dev,
+ unsigned int bridge_num)
{
- /* Check if the bridge is still in use, otherwise it is time
- * to clean it up so we can reuse this bridge_num later.
+ /* Since we refcount bridges, we know that when we call this function
+ * it is no longer in use, so we can just go ahead and remove it from
+ * the bit mask.
*/
- if (dsa_bridge_num_find(bridge_dev) < 0)
- clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
+ clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
@@ -436,9 +459,6 @@ static int dsa_port_setup(struct dsa_port *dp)
if (dp->setup)
return 0;
- INIT_LIST_HEAD(&dp->fdbs);
- INIT_LIST_HEAD(&dp->mdbs);
-
if (ds->ops->port_setup) {
err = ds->ops->port_setup(ds, dp->index);
if (err)
@@ -544,7 +564,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
struct dsa_switch *ds = dp->ds;
- struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
@@ -573,16 +592,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
break;
}
- list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
- list_del(&a->list);
- kfree(a);
- }
-
- list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
- list_del(&a->list);
- kfree(a);
- }
-
dp->setup = false;
}
@@ -802,17 +811,16 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst;
- int port, err;
+ struct dsa_port *cpu_dp;
+ int err;
if (tag_ops->proto == dst->default_proto)
- return 0;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ goto connect;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
rtnl_lock();
- err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
+ tag_ops->proto);
rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
@@ -821,7 +829,30 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
}
}
+connect:
+ if (tag_ops->connect) {
+ err = tag_ops->connect(ds);
+ if (err)
+ return err;
+ }
+
+ if (ds->ops->connect_tag_protocol) {
+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
+ if (err) {
+ dev_err(ds->dev,
+ "Unable to connect to tag protocol \"%s\": %pe\n",
+ tag_ops->name, ERR_PTR(err));
+ goto disconnect;
+ }
+ }
+
return 0;
+
+disconnect:
+ if (tag_ops->disconnect)
+ tag_ops->disconnect(ds);
+
+ return err;
}
static int dsa_switch_setup(struct dsa_switch *ds)
@@ -932,11 +963,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->slave_mii_bus = NULL;
}
- dsa_switch_unregister_notifier(ds);
-
if (ds->ops->teardown)
ds->ops->teardown(ds);
+ dsa_switch_unregister_notifier(ds);
+
if (ds->devlink) {
list_for_each_entry(dp, &ds->dst->ports, list)
if (dp->ds == ds)
@@ -976,23 +1007,28 @@ static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
dsa_switch_teardown(dp->ds);
}
-static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+/* Bring shared ports up first, then non-shared ports */
+static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
- int err;
+ int err = 0;
list_for_each_entry(dp, &dst->ports, list) {
- err = dsa_switch_setup(dp->ds);
- if (err)
- goto teardown;
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
+ err = dsa_port_setup(dp);
+ if (err)
+ goto teardown;
+ }
}
list_for_each_entry(dp, &dst->ports, list) {
- err = dsa_port_setup(dp);
- if (err) {
- err = dsa_port_reinit_as_unused(dp);
- if (err)
- goto teardown;
+ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
+ err = dsa_port_setup(dp);
+ if (err) {
+ err = dsa_port_reinit_as_unused(dp);
+ if (err)
+ goto teardown;
+ }
}
}
@@ -1001,7 +1037,21 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
teardown:
dsa_tree_teardown_ports(dst);
- dsa_tree_teardown_switches(dst);
+ return err;
+}
+
+static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+ int err = 0;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_switch_setup(dp->ds);
+ if (err) {
+ dsa_tree_teardown_switches(dst);
+ break;
+ }
+ }
return err;
}
@@ -1009,26 +1059,34 @@ teardown:
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
- int err;
+ int err = 0;
+
+ rtnl_lock();
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
err = dsa_master_setup(dp->master, dp);
if (err)
- return err;
+ break;
}
}
- return 0;
+ rtnl_unlock();
+
+ return err;
}
static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
+ rtnl_lock();
+
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_cpu(dp))
dsa_master_teardown(dp->master);
+
+ rtnl_unlock();
}
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
@@ -1080,10 +1138,14 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
if (err)
goto teardown_cpu_ports;
- err = dsa_tree_setup_master(dst);
+ err = dsa_tree_setup_ports(dst);
if (err)
goto teardown_switches;
+ err = dsa_tree_setup_master(dst);
+ if (err)
+ goto teardown_ports;
+
err = dsa_tree_setup_lags(dst);
if (err)
goto teardown_master;
@@ -1096,8 +1158,9 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
teardown_master:
dsa_tree_teardown_master(dst);
-teardown_switches:
+teardown_ports:
dsa_tree_teardown_ports(dst);
+teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1132,6 +1195,37 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dst->setup = false;
}
+static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
+ const struct dsa_device_ops *tag_ops)
+{
+ const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
+ struct dsa_notifier_tag_proto_info info;
+ int err;
+
+ dst->tag_ops = tag_ops;
+
+ /* Notify the switches from this tree about the connection
+ * to the new tagger
+ */
+ info.tag_ops = tag_ops;
+ err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
+ if (err && err != -EOPNOTSUPP)
+ goto out_disconnect;
+
+ /* Notify the old tagger about the disconnection from this tree */
+ info.tag_ops = old_tag_ops;
+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
+
+ return 0;
+
+out_disconnect:
+ info.tag_ops = tag_ops;
+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
+ dst->tag_ops = old_tag_ops;
+
+ return err;
+}
+
/* Since the dsa/tagging sysfs device attribute is per master, the assumption
* is that all DSA switches within a tree share the same tagger, otherwise
* they would have formed disjoint trees (different "dsa,member" values).
@@ -1157,19 +1251,22 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
goto out_unlock;
list_for_each_entry(dp, &dst->ports, list) {
- if (!dsa_is_user_port(dp->ds, dp->index))
+ if (!dsa_port_is_user(dp))
continue;
if (dp->slave->flags & IFF_UP)
goto out_unlock;
}
+ /* Notify the tag protocol change */
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
if (err)
goto out_unwind_tagger;
- dst->tag_ops = tag_ops;
+ err = dsa_tree_bind_tag_proto(dst, tag_ops);
+ if (err)
+ goto out_unwind_tagger;
rtnl_unlock();
@@ -1198,8 +1295,12 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
dp->ds = ds;
dp->index = index;
- dp->bridge_num = -1;
+ mutex_init(&dp->addr_lists_lock);
+ mutex_init(&dp->vlans_lock);
+ INIT_LIST_HEAD(&dp->fdbs);
+ INIT_LIST_HEAD(&dp->mdbs);
+ INIT_LIST_HEAD(&dp->vlans);
INIT_LIST_HEAD(&dp->list);
list_add_tail(&dp->list, &dst->ports);
@@ -1388,7 +1489,7 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
}
if (reg >= ds->num_ports) {
- dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
+ dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
port, reg, ds->num_ports);
of_node_put(port);
err = -EINVAL;
@@ -1549,6 +1650,10 @@ static void dsa_switch_release_ports(struct dsa_switch *ds)
list_for_each_entry_safe(dp, next, &dst->ports, list) {
if (dp->ds != ds)
continue;
+
+ WARN_ON(!list_empty(&dp->fdbs));
+ WARN_ON(!list_empty(&dp->mdbs));
+ WARN_ON(!list_empty(&dp->vlans));
list_del(&dp->list);
kfree(dp);
}
@@ -1634,7 +1739,6 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *master, *slave_dev;
- LIST_HEAD(unregister_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1655,25 +1759,13 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
slave_dev = dp->slave;
netdev_upper_dev_unlink(master, slave_dev);
- /* Just unlinking ourselves as uppers of the master is not
- * sufficient. When the master net device unregisters, that will
- * also call dev_close, which we will catch as NETDEV_GOING_DOWN
- * and trigger a dev_close on our own devices (dsa_slave_close).
- * In turn, that will call dev_mc_unsync on the master's net
- * device. If the master is also a DSA switch port, this will
- * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
- * its own master. Lockdep will complain about the fact that
- * all cascaded masters have the same dsa_master_addr_list_lock_key,
- * which it normally would not do if the cascaded masters would
- * be in a proper upper/lower relationship, which we've just
- * destroyed.
- * To suppress the lockdep warnings, let's actually unregister
- * the DSA slave interfaces too, to avoid the nonsensical
- * multicast address list synchronization on shutdown.
- */
- unregister_netdevice_queue(slave_dev, &unregister_list);
}
- unregister_netdevice_many(&unregister_list);
+
+ /* Disconnect from further netdevice notifiers on the master,
+ * since netdev_uses_dsa() will now return false.
+ */
+ dsa_switch_for_each_cpu_port(dp, ds)
+ dp->master->dsa_ptr = NULL;
rtnl_unlock();
out:
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index e91265434354..74b00217cb9c 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -24,8 +24,8 @@ enum {
DSA_NOTIFIER_FDB_DEL,
DSA_NOTIFIER_HOST_FDB_ADD,
DSA_NOTIFIER_HOST_FDB_DEL,
- DSA_NOTIFIER_HSR_JOIN,
- DSA_NOTIFIER_HSR_LEAVE,
+ DSA_NOTIFIER_LAG_FDB_ADD,
+ DSA_NOTIFIER_LAG_FDB_DEL,
DSA_NOTIFIER_LAG_CHANGE,
DSA_NOTIFIER_LAG_JOIN,
DSA_NOTIFIER_LAG_LEAVE,
@@ -35,12 +35,12 @@ enum {
DSA_NOTIFIER_HOST_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL,
+ DSA_NOTIFIER_HOST_VLAN_ADD,
+ DSA_NOTIFIER_HOST_VLAN_DEL,
DSA_NOTIFIER_MTU,
DSA_NOTIFIER_TAG_PROTO,
- DSA_NOTIFIER_MRP_ADD,
- DSA_NOTIFIER_MRP_DEL,
- DSA_NOTIFIER_MRP_ADD_RING_ROLE,
- DSA_NOTIFIER_MRP_DEL_RING_ROLE,
+ DSA_NOTIFIER_TAG_PROTO_CONNECT,
+ DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
};
@@ -52,49 +52,52 @@ struct dsa_notifier_ageing_time_info {
/* DSA_NOTIFIER_BRIDGE_* */
struct dsa_notifier_bridge_info {
- struct net_device *br;
- int tree_index;
- int sw_index;
- int port;
+ const struct dsa_port *dp;
+ struct dsa_bridge bridge;
+ bool tx_fwd_offload;
+ struct netlink_ext_ack *extack;
};
/* DSA_NOTIFIER_FDB_* */
struct dsa_notifier_fdb_info {
- int sw_index;
- int port;
+ const struct dsa_port *dp;
+ const unsigned char *addr;
+ u16 vid;
+ struct dsa_db db;
+};
+
+/* DSA_NOTIFIER_LAG_FDB_* */
+struct dsa_notifier_lag_fdb_info {
+ struct dsa_lag *lag;
const unsigned char *addr;
u16 vid;
+ struct dsa_db db;
};
/* DSA_NOTIFIER_MDB_* */
struct dsa_notifier_mdb_info {
+ const struct dsa_port *dp;
const struct switchdev_obj_port_mdb *mdb;
- int sw_index;
- int port;
+ struct dsa_db db;
};
/* DSA_NOTIFIER_LAG_* */
struct dsa_notifier_lag_info {
- struct net_device *lag;
- int sw_index;
- int port;
-
+ const struct dsa_port *dp;
+ struct dsa_lag lag;
struct netdev_lag_upper_info *info;
};
/* DSA_NOTIFIER_VLAN_* */
struct dsa_notifier_vlan_info {
+ const struct dsa_port *dp;
const struct switchdev_obj_port_vlan *vlan;
- int sw_index;
- int port;
struct netlink_ext_ack *extack;
};
/* DSA_NOTIFIER_MTU */
struct dsa_notifier_mtu_info {
- bool targeted_match;
- int sw_index;
- int port;
+ const struct dsa_port *dp;
int mtu;
};
@@ -103,32 +106,15 @@ struct dsa_notifier_tag_proto_info {
const struct dsa_device_ops *tag_ops;
};
-/* DSA_NOTIFIER_MRP_* */
-struct dsa_notifier_mrp_info {
- const struct switchdev_obj_mrp *mrp;
- int sw_index;
- int port;
-};
-
-/* DSA_NOTIFIER_MRP_* */
-struct dsa_notifier_mrp_ring_role_info {
- const struct switchdev_obj_ring_role_mrp *mrp;
- int sw_index;
- int port;
-};
-
/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
struct dsa_notifier_tag_8021q_vlan_info {
- int tree_index;
- int sw_index;
- int port;
+ const struct dsa_port *dp;
u16 vid;
};
struct dsa_switchdev_event_work {
- struct dsa_switch *ds;
- int port;
struct net_device *dev;
+ struct net_device *orig_dev;
struct work_struct work;
unsigned long event;
/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
@@ -139,11 +125,19 @@ struct dsa_switchdev_event_work {
bool host_addr;
};
-/* DSA_NOTIFIER_HSR_* */
-struct dsa_notifier_hsr_info {
- struct net_device *hsr;
- int sw_index;
- int port;
+enum dsa_standalone_event {
+ DSA_UC_ADD,
+ DSA_UC_DEL,
+ DSA_MC_ADD,
+ DSA_MC_DEL,
+};
+
+struct dsa_standalone_event_work {
+ struct work_struct work;
+ struct net_device *dev;
+ enum dsa_standalone_event event;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
};
struct dsa_slave_priv {
@@ -169,6 +163,8 @@ const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
+bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b);
+
bool dsa_schedule_work(struct work_struct *work);
void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
@@ -221,25 +217,36 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct netlink_ext_ack *extack);
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
-int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool targeted_match);
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
-int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
- u16 vid);
-int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
- u16 vid);
+int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid);
+int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid);
+int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
-int dsa_port_host_mdb_add(const struct dsa_port *dp,
- const struct switchdev_obj_port_mdb *mdb);
-int dsa_port_host_mdb_del(const struct dsa_port *dp,
- const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
@@ -251,6 +258,11 @@ int dsa_port_vlan_add(struct dsa_port *dp,
struct netlink_ext_ack *extack);
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
+int dsa_port_host_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int dsa_port_host_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan);
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp);
int dsa_port_mrp_del(const struct dsa_port *dp,
@@ -259,54 +271,13 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp);
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp);
+int dsa_port_phylink_create(struct dsa_port *dp);
int dsa_port_link_register_of(struct dsa_port *dp);
void dsa_port_link_unregister_of(struct dsa_port *dp);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
-extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
-
-static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
- const struct net_device *dev)
-{
- return dsa_port_to_bridge_port(dp) == dev;
-}
-
-static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
- const struct net_device *bridge_dev)
-{
- /* DSA ports connected to a bridge, and event was emitted
- * for the bridge.
- */
- return dp->bridge_dev == bridge_dev;
-}
-
-/* Returns true if any port of this tree offloads the given net_device */
-static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
- const struct net_device *dev)
-{
- struct dsa_port *dp;
-
- list_for_each_entry(dp, &dst->ports, list)
- if (dsa_port_offloads_bridge_port(dp, dev))
- return true;
-
- return false;
-}
-
-/* Returns true if any port of this tree offloads the given bridge */
-static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
- const struct net_device *bridge_dev)
-{
- struct dsa_port *dp;
-
- list_for_each_entry(dp, &dst->ports, list)
- if (dsa_port_offloads_bridge(dp, bridge_dev))
- return true;
-
- return false;
-}
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
@@ -346,7 +317,7 @@ dsa_slave_to_master(const struct net_device *dev)
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
u16 vid, pvid, proto;
@@ -416,7 +387,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
if (dp->type != DSA_PORT_TYPE_USER)
continue;
- if (!dp->bridge_dev)
+ if (!dp->bridge)
continue;
if (dp->stp_state != BR_STATE_LEARNING &&
@@ -445,7 +416,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
/* If the ingress port offloads the bridge, we mark the frame as autonomously
* forwarded by hardware, so the software bridge doesn't forward in twice, back
* to us, because we already did. However, if we're in fallback mode and we do
- * software bridging, we are not offloading it, therefore the dp->bridge_dev
+ * software bridging, we are not offloading it, therefore the dp->bridge
* pointer is not populated, and flooding needs to be done by software (we are
* effectively operating in standalone ports mode).
*/
@@ -453,7 +424,7 @@ static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- skb->offload_fwd_mark = !!(dp->bridge_dev);
+ skb->offload_fwd_mark = !!(dp->bridge);
}
/* Helper for removing DSA header tags from packets in the RX path.
@@ -538,23 +509,38 @@ static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
+static inline bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
+{
+ return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
+ ds->fdb_isolation && !ds->vlan_filtering_is_global &&
+ !ds->needs_standalone_vlan_filtering;
+}
+
+static inline bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
+{
+ return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
+ ds->fdb_isolation && !ds->vlan_filtering_is_global &&
+ !ds->needs_standalone_vlan_filtering;
+}
+
/* dsa2.c */
-void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
-void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
+void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
+void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
+struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
+ const struct net_device *lag_dev);
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
struct net_device *master,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
-int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
-void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num);
+unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
+void dsa_bridge_num_put(const struct net_device *bridge_dev,
+ unsigned int bridge_num);
+struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
+ const struct net_device *br);
/* tag_8021q.c */
-int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
- struct dsa_notifier_bridge_info *info);
-int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
- struct dsa_notifier_bridge_info *info);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info);
int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 69ec510abe83..46b1f0455a7b 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -259,16 +259,21 @@ static void dsa_netdev_ops_set(struct net_device *dev,
dev->dsa_ptr->netdev_ops = ops;
}
+/* Keep the master always promiscuous if the tagging protocol requires that
+ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
+ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
+ * anyway.
+ */
static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
- if (!ops->promisc_on_master)
+ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
return;
- rtnl_lock();
+ ASSERT_RTNL();
+
dev_set_promiscuity(dev, inc);
- rtnl_unlock();
}
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
@@ -333,16 +338,12 @@ static void dsa_master_reset_mtu(struct net_device *dev)
{
int err;
- rtnl_lock();
err = dev_set_mtu(dev, ETH_DATA_LEN);
if (err)
netdev_dbg(dev,
"Unable to reset MTU to exclude DSA overheads\n");
- rtnl_unlock();
}
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
@@ -360,9 +361,11 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
"Failed to create a device link to DSA switch %s\n",
dev_name(ds->dev));
- rtnl_lock();
+ /* The switch driver may not implement ->port_change_mtu(), case in
+ * which dsa_slave_change_mtu() will not update the master MTU either,
+ * so we need to do that here.
+ */
ret = dev_set_mtu(dev, mtu);
- rtnl_unlock();
if (ret)
netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
ret, mtu);
@@ -374,8 +377,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
wmb();
dev->dsa_ptr = cpu_dp;
- lockdep_set_class(&dev->addr_list_lock,
- &dsa_master_addr_list_lock_key);
dsa_master_set_promiscuity(dev, 1);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 31e8a7a8c3e6..961546cc0252 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -149,7 +149,7 @@ int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
return err;
}
- if (!dp->bridge_dev)
+ if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
if (dp->pl)
@@ -177,7 +177,7 @@ void dsa_port_disable_rt(struct dsa_port *dp)
if (dp->pl)
phylink_stop(dp->pl);
- if (!dp->bridge_dev)
+ if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
if (ds->ops->port_disable)
@@ -191,6 +191,59 @@ void dsa_port_disable(struct dsa_port *dp)
rtnl_unlock();
}
+static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
+ struct dsa_bridge bridge)
+{
+ struct netlink_ext_ack extack = {0};
+ bool change_vlan_filtering = false;
+ struct dsa_switch *ds = dp->ds;
+ bool vlan_filtering;
+ int err;
+
+ if (ds->needs_standalone_vlan_filtering &&
+ !br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = true;
+ } else if (!ds->needs_standalone_vlan_filtering &&
+ br_vlan_enabled(bridge.dev)) {
+ change_vlan_filtering = true;
+ vlan_filtering = false;
+ }
+
+ /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
+ * event for changing vlan_filtering setting upon slave ports leaving
+ * it. That is a good thing, because that lets us handle it and also
+ * handle the case where the switch's vlan_filtering setting is global
+ * (not per port). When that happens, the correct moment to trigger the
+ * vlan_filtering callback is only when the last port leaves the last
+ * VLAN-aware bridge.
+ */
+ if (change_vlan_filtering && ds->vlan_filtering_is_global) {
+ dsa_switch_for_each_port(dp, ds) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br_vlan_enabled(br)) {
+ change_vlan_filtering = false;
+ break;
+ }
+ }
+ }
+
+ if (!change_vlan_filtering)
+ return;
+
+ err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack);
+ if (extack._msg) {
+ dev_err(ds->dev, "port %d: %s\n", dp->index,
+ extack._msg);
+ }
+ if (err && err != -EOPNOTSUPP) {
+ dev_err(ds->dev,
+ "port %d failed to reset VLAN filtering to %d: %pe\n",
+ dp->index, vlan_filtering, ERR_PTR(err));
+ }
+}
+
static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
@@ -240,7 +293,7 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
int err;
err = dsa_port_inherit_brport_flags(dp, extack);
@@ -262,7 +315,8 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
return 0;
}
-static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
+static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp,
+ struct dsa_bridge bridge)
{
/* Configure the port for standalone mode (no address learning,
* flood everything).
@@ -282,92 +336,96 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
*/
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
- /* VLAN filtering is handled by dsa_switch_bridge_leave */
+ dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it
* here because we have no good reason (or value) to change it to.
*/
}
-static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
- struct net_device *bridge_dev)
+static int dsa_port_bridge_create(struct dsa_port *dp,
+ struct net_device *br,
+ struct netlink_ext_ack *extack)
{
- int bridge_num = dp->bridge_num;
struct dsa_switch *ds = dp->ds;
+ struct dsa_bridge *bridge;
- /* No bridge TX forwarding offload => do nothing */
- if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
- return;
+ bridge = dsa_tree_bridge_find(ds->dst, br);
+ if (bridge) {
+ refcount_inc(&bridge->refcount);
+ dp->bridge = bridge;
+ return 0;
+ }
- dp->bridge_num = -1;
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
- dsa_bridge_num_put(bridge_dev, bridge_num);
+ refcount_set(&bridge->refcount, 1);
- /* Notify the chips only once the offload has been deactivated, so
- * that they can update their configuration accordingly.
- */
- ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
- bridge_num);
+ bridge->dev = br;
+
+ bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
+ if (ds->max_num_bridges && !bridge->num) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range of offloadable bridges exceeded");
+ kfree(bridge);
+ return -EOPNOTSUPP;
+ }
+
+ dp->bridge = bridge;
+
+ return 0;
}
-static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
- struct net_device *bridge_dev)
+static void dsa_port_bridge_destroy(struct dsa_port *dp,
+ const struct net_device *br)
{
- struct dsa_switch *ds = dp->ds;
- int bridge_num, err;
+ struct dsa_bridge *bridge = dp->bridge;
- if (!ds->ops->port_bridge_tx_fwd_offload)
- return false;
+ dp->bridge = NULL;
- bridge_num = dsa_bridge_num_get(bridge_dev,
- ds->num_fwd_offloading_bridges);
- if (bridge_num < 0)
- return false;
-
- dp->bridge_num = bridge_num;
+ if (!refcount_dec_and_test(&bridge->refcount))
+ return;
- /* Notify the driver */
- err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
- bridge_num);
- if (err) {
- dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
- return false;
- }
+ if (bridge->num)
+ dsa_bridge_num_put(br, bridge->num);
- return true;
+ kfree(bridge);
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
- .br = br,
+ .dp = dp,
+ .extack = extack,
};
struct net_device *dev = dp->slave;
struct net_device *brport_dev;
- bool tx_fwd_offload;
int err;
/* Here the interface is already bridged. Reflect the current
* configuration so that drivers can program their chips accordingly.
*/
- dp->bridge_dev = br;
+ err = dsa_port_bridge_create(dp, br, extack);
+ if (err)
+ return err;
brport_dev = dsa_port_to_bridge_port(dp);
+ info.bridge = *dp->bridge;
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
if (err)
goto out_rollback;
- tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
+ /* Drivers which support bridge TX forwarding should set this */
+ dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
err = switchdev_bridge_port_offload(brport_dev, dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier,
- tx_fwd_offload, extack);
+ dp->bridge->tx_fwd_offload, extack);
if (err)
goto out_rollback_unbridge;
@@ -384,7 +442,7 @@ out_rollback_unoffload:
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
- dp->bridge_dev = NULL;
+ dsa_port_bridge_destroy(dp, br);
return err;
}
@@ -399,24 +457,29 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
switchdev_bridge_port_unoffload(brport_dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier);
+
+ dsa_flush_workqueue();
}
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
- .br = br,
+ .dp = dp,
};
int err;
+ /* If the port could not be offloaded to begin with, then
+ * there is nothing to do.
+ */
+ if (!dp->bridge)
+ return;
+
+ info.bridge = *dp->bridge;
+
/* Here the port is already unbridged. Reflect the current configuration
* so that drivers can program their chips accordingly.
*/
- dp->bridge_dev = NULL;
-
- dsa_port_bridge_tx_fwd_unoffload(dp, br);
+ dsa_port_bridge_destroy(dp, br);
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
if (err)
@@ -424,19 +487,18 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
"port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
- dsa_port_switchdev_unsync_attrs(dp);
+ dsa_port_switchdev_unsync_attrs(dp, info.bridge);
}
int dsa_port_lag_change(struct dsa_port *dp,
struct netdev_lag_lower_state_info *linfo)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
};
bool tx_enabled;
- if (!dp->lag_dev)
+ if (!dp->lag)
return 0;
/* On statically configured aggregates (e.g. loadbalance
@@ -454,27 +516,69 @@ int dsa_port_lag_change(struct dsa_port *dp,
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}
-int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
+static int dsa_port_lag_create(struct dsa_port *dp,
+ struct net_device *lag_dev)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_lag *lag;
+
+ lag = dsa_tree_lag_find(ds->dst, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->refcount);
+ dp->lag = lag;
+ return 0;
+ }
+
+ lag = kzalloc(sizeof(*lag), GFP_KERNEL);
+ if (!lag)
+ return -ENOMEM;
+
+ refcount_set(&lag->refcount, 1);
+ mutex_init(&lag->fdb_lock);
+ INIT_LIST_HEAD(&lag->fdbs);
+ lag->dev = lag_dev;
+ dsa_lag_map(ds->dst, lag);
+ dp->lag = lag;
+
+ return 0;
+}
+
+static void dsa_port_lag_destroy(struct dsa_port *dp)
+{
+ struct dsa_lag *lag = dp->lag;
+
+ dp->lag = NULL;
+ dp->lag_tx_enabled = false;
+
+ if (!refcount_dec_and_test(&lag->refcount))
+ return;
+
+ WARN_ON(!list_empty(&lag->fdbs));
+ dsa_lag_unmap(dp->ds->dst, lag);
+ kfree(lag);
+}
+
+int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .lag = lag,
+ .dp = dp,
.info = uinfo,
};
struct net_device *bridge_dev;
int err;
- dsa_lag_map(dp->ds->dst, lag);
- dp->lag_dev = lag;
+ err = dsa_port_lag_create(dp, lag_dev);
+ if (err)
+ goto err_lag_create;
+ info.lag = *dp->lag;
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
if (err)
goto err_lag_join;
- bridge_dev = netdev_master_upper_dev_get(lag);
+ bridge_dev = netdev_master_upper_dev_get(lag_dev);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return 0;
@@ -487,45 +591,45 @@ int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
err_bridge_join:
dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
- dp->lag_dev = NULL;
- dsa_lag_unmap(dp->ds->dst, lag);
+ dsa_port_lag_destroy(dp);
+err_lag_create:
return err;
}
-void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
+void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
- if (dp->bridge_dev)
- dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br)
+ dsa_port_pre_bridge_leave(dp, br);
}
-void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
+void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .lag = lag,
+ .dp = dp,
};
int err;
- if (!dp->lag_dev)
+ if (!dp->lag)
return;
/* Port might have been part of a LAG that in turn was
* attached to a bridge.
*/
- if (dp->bridge_dev)
- dsa_port_bridge_leave(dp, dp->bridge_dev);
+ if (br)
+ dsa_port_bridge_leave(dp, br);
- dp->lag_tx_enabled = false;
- dp->lag_dev = NULL;
+ info.lag = *dp->lag;
+
+ dsa_port_lag_destroy(dp);
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
-
- dsa_lag_unmap(dp->ds->dst, lag);
}
/* Must be called under rcu_read_lock() */
@@ -534,16 +638,17 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
- int err, i;
+ struct dsa_port *other_dp;
+ int err;
/* VLAN awareness was off, so the question is "can we turn it on".
* We may have had 8021q uppers, those need to go. Make sure we don't
* enter an inconsistent state: deny changing the VLAN awareness state
* as long as we have 8021q uppers.
*/
- if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
+ if (vlan_filtering && dsa_port_is_user(dp)) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *upper_dev, *slave = dp->slave;
- struct net_device *br = dp->bridge_dev;
struct list_head *iter;
netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
@@ -576,18 +681,16 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
* different ports of the same switch device and one of them has a
* different setting than what is being requested.
*/
- for (i = 0; i < ds->num_ports; i++) {
- struct net_device *other_bridge;
+ dsa_switch_for_each_port(other_dp, ds) {
+ struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
- other_bridge = dsa_to_port(ds, i)->bridge_dev;
- if (!other_bridge)
- continue;
/* If it's the same bridge, it also has same
* vlan_filtering setting => no need to check
*/
- if (other_bridge == dp->bridge_dev)
+ if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
continue;
- if (br_vlan_enabled(other_bridge) != vlan_filtering) {
+
+ if (br_vlan_enabled(other_br) != vlan_filtering) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN filtering is a global setting");
return false;
@@ -626,20 +729,16 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
return err;
if (ds->vlan_filtering_is_global) {
- int port;
+ struct dsa_port *other_dp;
ds->vlan_filtering = vlan_filtering;
- for (port = 0; port < ds->num_ports; port++) {
- struct net_device *slave;
-
- if (!dsa_is_user_port(ds, port))
- continue;
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = dp->slave;
/* We might be called in the unbind path, so not
* all slave devices might still be registered.
*/
- slave = dsa_to_port(ds, port)->slave;
if (!slave)
continue;
@@ -675,13 +774,13 @@ restore:
*/
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_switch *ds = dp->ds;
- if (!dp->bridge_dev)
+ if (!br)
return false;
- return (!ds->configure_vlan_while_not_filtering &&
- !br_vlan_enabled(dp->bridge_dev));
+ return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
}
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
@@ -745,13 +844,10 @@ int dsa_port_bridge_flags(struct dsa_port *dp,
return 0;
}
-int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
- bool targeted_match)
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu)
{
struct dsa_notifier_mtu_info info = {
- .sw_index = dp->ds->index,
- .targeted_match = targeted_match,
- .port = dp->index,
+ .dp = dp,
.mtu = new_mtu,
};
@@ -762,12 +858,22 @@ int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
};
+ /* Refcounting takes bridge.num as a key, and should be global for all
+ * bridges in the absence of FDB isolation, and per bridge otherwise.
+ * Force the bridge.num to zero here in the absence of FDB isolation.
+ */
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
}
@@ -775,52 +881,155 @@ int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
-
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
};
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
}
-int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
- u16 vid)
+static int dsa_port_host_fdb_add(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
+ .db = db,
+ };
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
+}
+
+int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_db db = {
+ .type = DSA_DB_PORT,
+ .dp = dp,
};
+
+ return dsa_port_host_fdb_add(dp, addr, vid, db);
+}
+
+int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid)
+{
struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ };
int err;
- err = dev_uc_add(cpu_dp->master, addr);
- if (err)
- return err;
+ /* Avoid a call to __dev_set_promiscuity() on the master, which
+ * requires rtnl_lock(), since we can't guarantee that is held here,
+ * and we can't take it either.
+ */
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_add(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
- return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
+ return dsa_port_host_fdb_add(dp, addr, vid, db);
}
-int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
- u16 vid)
+static int dsa_port_host_fdb_del(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct dsa_notifier_fdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.addr = addr,
.vid = vid,
+ .db = db,
};
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
+}
+
+int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_db db = {
+ .type = DSA_DB_PORT,
+ .dp = dp,
+ };
+
+ return dsa_port_host_fdb_del(dp, addr, vid, db);
+}
+
+int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
+ const unsigned char *addr, u16 vid)
+{
struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ };
int err;
- err = dev_uc_del(cpu_dp->master, addr);
- if (err)
- return err;
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_del(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
- return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
+ return dsa_port_host_fdb_del(dp, addr, vid, db);
+}
+
+int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_lag_fdb_info info = {
+ .lag = dp->lag,
+ .addr = addr,
+ .vid = vid,
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
+ };
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
+}
+
+int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_lag_fdb_info info = {
+ .lag = dp->lag,
+ .addr = addr,
+ .vid = vid,
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
+ };
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
}
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
@@ -838,11 +1047,17 @@ int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
};
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
}
@@ -850,48 +1065,106 @@ int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
+ .db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ },
};
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
}
-int dsa_port_host_mdb_add(const struct dsa_port *dp,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
+ .db = db,
+ };
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
+}
+
+int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_db db = {
+ .type = DSA_DB_PORT,
+ .dp = dp,
};
+
+ return dsa_port_host_mdb_add(dp, mdb, db);
+}
+
+int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ };
int err;
err = dev_mc_add(cpu_dp->master, mdb->addr);
if (err)
return err;
- return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
+ return dsa_port_host_mdb_add(dp, mdb, db);
}
-int dsa_port_host_mdb_del(const struct dsa_port *dp,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct dsa_notifier_mdb_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.mdb = mdb,
+ .db = db,
};
+
+ if (!dp->ds->fdb_isolation)
+ info.db.bridge.num = 0;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
+}
+
+int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_db db = {
+ .type = DSA_DB_PORT,
+ .dp = dp,
+ };
+
+ return dsa_port_host_mdb_del(dp, mdb, db);
+}
+
+int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = *dp->bridge,
+ };
int err;
err = dev_mc_del(cpu_dp->master, mdb->addr);
if (err)
return err;
- return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
+ return dsa_port_host_mdb_del(dp, mdb, db);
}
int dsa_port_vlan_add(struct dsa_port *dp,
@@ -899,8 +1172,7 @@ int dsa_port_vlan_add(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
.extack = extack,
};
@@ -912,60 +1184,95 @@ int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_notifier_vlan_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vlan = vlan,
};
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
}
+int dsa_port_host_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_notifier_vlan_info info = {
+ .dp = dp,
+ .vlan = vlan,
+ .extack = extack,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
+
+ return err;
+}
+
+int dsa_port_host_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_notifier_vlan_info info = {
+ .dp = dp,
+ .vlan = vlan,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
+
+ return err;
+}
+
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
- struct dsa_notifier_mrp_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_add)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
+ return ds->ops->port_mrp_add(ds, dp->index, mrp);
}
int dsa_port_mrp_del(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
- struct dsa_notifier_mrp_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_del)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
+ return ds->ops->port_mrp_del(ds, dp->index, mrp);
}
int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
- struct dsa_notifier_mrp_ring_role_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_add_ring_role)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
+ return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
}
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
- struct dsa_notifier_mrp_ring_role_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
+ if (!ds->ops->port_mrp_del_ring_role)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
}
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
@@ -1060,7 +1367,7 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
struct phy_device *phydev = NULL;
struct dsa_switch *ds = dp->ds;
- if (dsa_is_user_port(ds, dp->index))
+ if (dsa_port_is_user(dp))
phydev = dp->slave->phydev;
if (!ds->ops->phylink_mac_link_down) {
@@ -1092,7 +1399,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
speed, duplex, tx_pause, rx_pause);
}
-const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
+static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.validate = dsa_port_phylink_validate,
.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
.mac_config = dsa_port_phylink_mac_config,
@@ -1101,6 +1408,36 @@ const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.mac_link_up = dsa_port_phylink_mac_link_up,
};
+int dsa_port_phylink_create(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ phy_interface_t mode;
+ int err;
+
+ err = of_get_phy_mode(dp->dn, &mode);
+ if (err)
+ mode = PHY_INTERFACE_MODE_NA;
+
+ /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
+ * an indicator of a legacy phylink driver.
+ */
+ if (ds->ops->phylink_mac_link_state ||
+ ds->ops->phylink_mac_an_restart)
+ dp->pl_config.legacy_pre_march2020 = true;
+
+ if (ds->ops->phylink_get_caps)
+ ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
+
+ dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
+ mode, &dsa_port_phylink_mac_ops);
+ if (IS_ERR(dp->pl)) {
+ pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
+ return PTR_ERR(dp->pl);
+ }
+
+ return 0;
+}
+
static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
{
struct dsa_switch *ds = dp->ds;
@@ -1177,23 +1514,15 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *port_dn = dp->dn;
- phy_interface_t mode;
int err;
- err = of_get_phy_mode(port_dn, &mode);
- if (err)
- mode = PHY_INTERFACE_MODE_NA;
-
dp->pl_config.dev = ds->dev;
dp->pl_config.type = PHYLINK_DEV;
dp->pl_config.pcs_poll = ds->pcs_poll;
- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
- mode, &dsa_port_phylink_mac_ops);
- if (IS_ERR(dp->pl)) {
- pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
- return PTR_ERR(dp->pl);
- }
+ err = dsa_port_phylink_create(dp);
+ if (err)
+ return err;
err = phylink_of_phy_connect(dp->pl, port_dn, 0);
if (err && err != -ENODEV) {
@@ -1255,75 +1584,17 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
dsa_port_setup_phy_of(dp, false);
}
-int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
-{
- struct phy_device *phydev;
- int ret = -EOPNOTSUPP;
-
- if (of_phy_is_fixed_link(dp->dn))
- return ret;
-
- phydev = dsa_port_get_phy_device(dp);
- if (IS_ERR_OR_NULL(phydev))
- return ret;
-
- ret = phy_ethtool_get_strings(phydev, data);
- put_device(&phydev->mdio.dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
-
-int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
-{
- struct phy_device *phydev;
- int ret = -EOPNOTSUPP;
-
- if (of_phy_is_fixed_link(dp->dn))
- return ret;
-
- phydev = dsa_port_get_phy_device(dp);
- if (IS_ERR_OR_NULL(phydev))
- return ret;
-
- ret = phy_ethtool_get_stats(phydev, NULL, data);
- put_device(&phydev->mdio.dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
-
-int dsa_port_get_phy_sset_count(struct dsa_port *dp)
-{
- struct phy_device *phydev;
- int ret = -EOPNOTSUPP;
-
- if (of_phy_is_fixed_link(dp->dn))
- return ret;
-
- phydev = dsa_port_get_phy_device(dp);
- if (IS_ERR_OR_NULL(phydev))
- return ret;
-
- ret = phy_ethtool_get_sset_count(phydev);
- put_device(&phydev->mdio.dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
-
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
{
- struct dsa_notifier_hsr_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .hsr = hsr,
- };
+ struct dsa_switch *ds = dp->ds;
int err;
+ if (!ds->ops->port_hsr_join)
+ return -EOPNOTSUPP;
+
dp->hsr_dev = hsr;
- err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
+ err = ds->ops->port_hsr_join(ds, dp->index, hsr);
if (err)
dp->hsr_dev = NULL;
@@ -1332,28 +1603,24 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
{
- struct dsa_notifier_hsr_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .hsr = hsr,
- };
+ struct dsa_switch *ds = dp->ds;
int err;
dp->hsr_dev = NULL;
- err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
- if (err)
- dev_err(dp->ds->dev,
- "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n",
- dp->index, ERR_PTR(err));
+ if (ds->ops->port_hsr_leave) {
+ err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
+ if (err)
+ dev_err(dp->ds->dev,
+ "port %d failed to leave HSR %s: %pe\n",
+ dp->index, hsr->name, ERR_PTR(err));
+ }
}
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
@@ -1366,9 +1633,7 @@ int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
{
struct dsa_notifier_tag_8021q_vlan_info info = {
- .tree_index = dp->ds->dst->index,
- .sw_index = dp->ds->index,
- .port = dp->index,
+ .dp = dp,
.vid = vid,
};
int err;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 11ec9e689589..ae9119a06c74 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -19,10 +19,151 @@
#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/if_hsr.h>
+#include <net/dcbnl.h>
#include <linux/netpoll.h>
#include "dsa_priv.h"
+static void dsa_slave_standalone_event_work(struct work_struct *work)
+{
+ struct dsa_standalone_event_work *standalone_work =
+ container_of(work, struct dsa_standalone_event_work, work);
+ const unsigned char *addr = standalone_work->addr;
+ struct net_device *dev = standalone_work->dev;
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_mdb mdb;
+ struct dsa_switch *ds = dp->ds;
+ u16 vid = standalone_work->vid;
+ int err;
+
+ switch (standalone_work->event) {
+ case DSA_UC_ADD:
+ err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to add %pM vid %d to fdb: %d\n",
+ dp->index, addr, vid, err);
+ break;
+ }
+ break;
+
+ case DSA_UC_DEL:
+ err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to delete %pM vid %d from fdb: %d\n",
+ dp->index, addr, vid, err);
+ }
+
+ break;
+ case DSA_MC_ADD:
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = dsa_port_standalone_host_mdb_add(dp, &mdb);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to add %pM vid %d to mdb: %d\n",
+ dp->index, addr, vid, err);
+ break;
+ }
+ break;
+ case DSA_MC_DEL:
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = dsa_port_standalone_host_mdb_del(dp, &mdb);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to delete %pM vid %d from mdb: %d\n",
+ dp->index, addr, vid, err);
+ }
+
+ break;
+ }
+
+ kfree(standalone_work);
+}
+
+static int dsa_slave_schedule_standalone_work(struct net_device *dev,
+ enum dsa_standalone_event event,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_standalone_event_work *standalone_work;
+
+ standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
+ if (!standalone_work)
+ return -ENOMEM;
+
+ INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
+ standalone_work->event = event;
+ standalone_work->dev = dev;
+
+ ether_addr_copy(standalone_work->addr, addr);
+ standalone_work->vid = vid;
+
+ dsa_schedule_work(&standalone_work->work);
+
+ return 0;
+}
+
+static int dsa_slave_sync_uc(struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ dev_uc_add(master, addr);
+
+ if (!dsa_switch_supports_uc_filtering(dp->ds))
+ return 0;
+
+ return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
+}
+
+static int dsa_slave_unsync_uc(struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ dev_uc_del(master, addr);
+
+ if (!dsa_switch_supports_uc_filtering(dp->ds))
+ return 0;
+
+ return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
+}
+
+static int dsa_slave_sync_mc(struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ dev_mc_add(master, addr);
+
+ if (!dsa_switch_supports_mc_filtering(dp->ds))
+ return 0;
+
+ return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
+}
+
+static int dsa_slave_unsync_mc(struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ dev_mc_del(master, addr);
+
+ if (!dsa_switch_supports_mc_filtering(dp->ds))
+ return 0;
+
+ return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
+}
+
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
@@ -67,6 +208,7 @@ static int dsa_slave_open(struct net_device *dev)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
int err;
err = dev_open(master, NULL);
@@ -75,38 +217,30 @@ static int dsa_slave_open(struct net_device *dev)
goto out;
}
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
- err = dev_uc_add(master, dev->dev_addr);
- if (err < 0)
+ if (dsa_switch_supports_uc_filtering(ds)) {
+ err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
+ if (err)
goto out;
}
- if (dev->flags & IFF_ALLMULTI) {
- err = dev_set_allmulti(master, 1);
- if (err < 0)
- goto del_unicast;
- }
- if (dev->flags & IFF_PROMISC) {
- err = dev_set_promiscuity(master, 1);
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
+ err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
- goto clear_allmulti;
+ goto del_host_addr;
}
err = dsa_port_enable_rt(dp, dev->phydev);
if (err)
- goto clear_promisc;
+ goto del_unicast;
return 0;
-clear_promisc:
- if (dev->flags & IFF_PROMISC)
- dev_set_promiscuity(master, -1);
-clear_allmulti:
- if (dev->flags & IFF_ALLMULTI)
- dev_set_allmulti(master, -1);
del_unicast:
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
+del_host_addr:
+ if (dsa_switch_supports_uc_filtering(ds))
+ dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
out:
return err;
}
@@ -115,68 +249,121 @@ static int dsa_slave_close(struct net_device *dev)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
dsa_port_disable_rt(dp);
- dev_mc_unsync(master, dev);
- dev_uc_unsync(master, dev);
- if (dev->flags & IFF_ALLMULTI)
- dev_set_allmulti(master, -1);
- if (dev->flags & IFF_PROMISC)
- dev_set_promiscuity(master, -1);
-
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
+ if (dsa_switch_supports_uc_filtering(ds))
+ dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+
return 0;
}
-static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
+/* Keep flooding enabled towards this port's CPU port as long as it serves at
+ * least one port in the tree that requires it.
+ */
+static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
{
- struct net_device *master = dsa_slave_to_master(dev);
- if (dev->flags & IFF_UP) {
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(master,
- dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(master,
- dev->flags & IFF_PROMISC ? 1 : -1);
+ struct switchdev_brport_flags flags = {
+ .mask = BR_FLOOD | BR_MCAST_FLOOD,
+ };
+ struct dsa_switch_tree *dst = dp->ds->dst;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_port *other_dp;
+ int err;
+
+ list_for_each_entry(other_dp, &dst->ports, list) {
+ if (!dsa_port_is_user(other_dp))
+ continue;
+
+ if (other_dp->cpu_dp != cpu_dp)
+ continue;
+
+ if (other_dp->slave->flags & IFF_ALLMULTI)
+ flags.val |= BR_MCAST_FLOOD;
+ if (other_dp->slave->flags & IFF_PROMISC)
+ flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
}
+
+ err = dsa_port_pre_bridge_flags(dp, flags, NULL);
+ if (err)
+ return;
+
+ dsa_port_bridge_flags(cpu_dp, flags, NULL);
}
-static void dsa_slave_set_rx_mode(struct net_device *dev)
+static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(master,
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(master,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+
+ if (dsa_switch_supports_uc_filtering(ds) &&
+ dsa_switch_supports_mc_filtering(ds))
+ dsa_port_manage_cpu_flood(dp);
+}
- dev_mc_sync(master, dev);
- dev_uc_sync(master, dev);
+static void dsa_slave_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
+ __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
}
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* If the port is down, the address isn't synced yet to hardware or
+ * to the DSA master, so there is nothing to change.
+ */
if (!(dev->flags & IFF_UP))
- goto out;
+ goto out_change_dev_addr;
+
+ if (dsa_switch_supports_uc_filtering(ds)) {
+ err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
+ if (err)
+ return err;
+ }
if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
err = dev_uc_add(master, addr->sa_data);
if (err < 0)
- return err;
+ goto del_unicast;
}
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
-out:
+ if (dsa_switch_supports_uc_filtering(ds))
+ dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+
+out_change_dev_addr:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
+
+del_unicast:
+ if (dsa_switch_supports_uc_filtering(ds))
+ dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
+
+ return err;
}
struct dsa_slave_dump_ctx {
@@ -289,14 +476,14 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
ret = dsa_port_set_state(dp, attr->u.stp_state, true);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
@@ -348,9 +535,8 @@ static int dsa_slave_vlan_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct switchdev_obj_port_vlan vlan;
+ struct switchdev_obj_port_vlan *vlan;
int err;
if (dsa_port_skip_vlan_configuration(dp)) {
@@ -358,14 +544,14 @@ static int dsa_slave_vlan_add(struct net_device *dev,
return 0;
}
- vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
* the same VID.
*/
- if (br_vlan_enabled(dp->bridge_dev)) {
+ if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
- err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
+ err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
rcu_read_unlock();
if (err) {
NL_SET_ERR_MSG_MOD(extack,
@@ -374,21 +560,36 @@ static int dsa_slave_vlan_add(struct net_device *dev,
}
}
- err = dsa_port_vlan_add(dp, &vlan, extack);
- if (err)
- return err;
+ return dsa_port_vlan_add(dp, vlan, extack);
+}
+
+/* Offload a VLAN installed on the bridge or on a foreign interface by
+ * installing it as a VLAN towards the CPU port.
+ */
+static int dsa_slave_host_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan;
+
+ /* Do nothing if this is a software bridge */
+ if (!dp->bridge)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp)) {
+ NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
+ return 0;
+ }
- /* We need the dedicated CPU port to be a member of the VLAN as well.
- * Even though drivers often handle CPU membership in special ways,
+ vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ /* Even though drivers often handle CPU membership in special ways,
* it doesn't make sense to program a PVID, so clear this flag.
*/
vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
- err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack);
- if (err)
- return err;
-
- return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
+ return dsa_port_host_vlan_add(dp, &vlan, extack);
}
static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
@@ -409,25 +610,25 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
- err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- return -EOPNOTSUPP;
-
- err = dsa_slave_vlan_add(dev, obj, extack);
+ if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ err = dsa_slave_vlan_add(dev, obj, extack);
+ else
+ err = dsa_slave_host_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add_ring_role(dp,
@@ -444,26 +645,33 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
static int dsa_slave_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
- int err;
if (dsa_port_skip_vlan_configuration(dp))
return 0;
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- /* Do not deprogram the CPU port as it may be shared with other user
- * ports which can be members of this VLAN as well.
- */
- err = dsa_port_vlan_del(dp, vlan);
- if (err)
- return err;
+ return dsa_port_vlan_del(dp, vlan);
+}
- vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
+static int dsa_slave_host_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan *vlan;
- return 0;
+ /* Do nothing if this is a software bridge */
+ if (!dp->bridge)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ return dsa_port_host_vlan_del(dp, vlan);
}
static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
@@ -483,25 +691,25 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
- err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- return -EOPNOTSUPP;
-
- err = dsa_slave_vlan_del(dev, obj);
+ if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ err = dsa_slave_vlan_del(dev, obj);
+ else
+ err = dsa_slave_host_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del_ring_role(dp,
@@ -515,26 +723,6 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static int dsa_slave_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_switch *ds = dp->ds;
- struct dsa_switch_tree *dst = ds->dst;
-
- /* For non-legacy ports, devlink is used and it takes
- * care of the name generation. This ndo implementation
- * should be removed with legacy support.
- */
- if (dp->ds->devlink)
- return -EOPNOTSUPP;
-
- ppid->id_len = sizeof(dst->index);
- memcpy(&ppid->id, &dst->index, ppid->id_len);
-
- return 0;
-}
-
static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
struct sk_buff *skb)
{
@@ -789,6 +977,37 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
+static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_phy_stats)
+ ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
+}
+
+static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_mac_stats)
+ ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
+}
+
+static void
+dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_ctrl_stats)
+ ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
+}
+
static void dsa_slave_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
@@ -942,24 +1161,6 @@ static void dsa_slave_poll_controller(struct net_device *dev)
}
#endif
-static int dsa_slave_get_phys_port_name(struct net_device *dev,
- char *name, size_t len)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- /* For non-legacy ports, devlink is used and it takes
- * care of the name generation. This ndo implementation
- * should be removed with legacy support.
- */
- if (dp->ds->devlink)
- return -EOPNOTSUPP;
-
- if (snprintf(name, len, "p%d", dp->index) >= len)
- return -EINVAL;
-
- return 0;
-}
-
static struct dsa_mall_tc_entry *
dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
{
@@ -978,6 +1179,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
+ struct netlink_ext_ack *extack = cls->common.extack;
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_mall_mirror_tc_entry *mirror;
@@ -1015,7 +1217,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
- err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
+ err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
if (err) {
kfree(mall_tc_entry);
return err;
@@ -1354,7 +1556,6 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
@@ -1374,7 +1575,7 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
}
/* And CPU port... */
- ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack);
+ ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
if (ret) {
if (extack._msg)
netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
@@ -1382,13 +1583,12 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
return ret;
}
- return vlan_vid_add(master, proto, vid);
+ return 0;
}
static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid = vid,
@@ -1397,16 +1597,11 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
};
int err;
- /* Do not deprogram the CPU port as it may be shared with other user
- * ports which can be members of this VLAN as well.
- */
err = dsa_port_vlan_del(dp, &vlan);
if (err)
return err;
- vlan_vid_del(master, proto, vid);
-
- return 0;
+ return dsa_port_host_vlan_del(dp, &vlan);
}
static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
@@ -1533,7 +1728,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (!dp->ds->mtu_enforcement_ingress)
return;
- if (!dp->bridge_dev)
+ if (!dp->bridge)
return;
INIT_LIST_HEAD(&hw_port_list);
@@ -1549,7 +1744,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
- if (other_dp->bridge_dev != dp->bridge_dev)
+ if (!dsa_port_bridge_same(dp, other_dp))
continue;
if (!other_dp->ds->mtu_enforcement_ingress)
@@ -1593,11 +1788,9 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- struct dsa_port *dp_iter;
- struct dsa_port *cpu_dp;
- int port = p->dp->index;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_port *other_dp;
int largest_mtu = 0;
int new_master_mtu;
int old_master_mtu;
@@ -1608,33 +1801,28 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
- list_for_each_entry(dp_iter, &ds->dst->ports, list) {
+ dsa_tree_for_each_user_port(other_dp, ds->dst) {
int slave_mtu;
- if (!dsa_port_is_user(dp_iter))
- continue;
-
/* During probe, this function will be called for each slave
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
- if (!dp_iter->slave)
+ if (!other_dp->slave)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
- if (dp_iter == dp)
+ if (dp == other_dp)
slave_mtu = new_mtu;
else
- slave_mtu = dp_iter->slave->mtu;
+ slave_mtu = other_dp->slave->mtu;
if (largest_mtu < slave_mtu)
largest_mtu = slave_mtu;
}
- cpu_dp = dsa_to_port(ds, port)->cpu_dp;
-
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
old_master_mtu = master->mtu;
new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
@@ -1653,15 +1841,14 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
goto out_master_failed;
/* We only need to propagate the MTU of the CPU port to
- * upstream switches, so create a non-targeted notifier which
- * updates all switches.
+ * upstream switches, so emit a notifier which updates them.
*/
- err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
+ err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
if (err)
goto out_cpu_failed;
}
- err = dsa_port_mtu_change(dp, new_mtu, true);
+ err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
if (err)
goto out_port_failed;
@@ -1674,8 +1861,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu -
- dsa_tag_protocol_overhead(cpu_dp->tag_ops),
- false);
+ dsa_tag_protocol_overhead(cpu_dp->tag_ops));
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
dev_set_mtu(master, old_master_mtu);
@@ -1683,6 +1869,209 @@ out_master_failed:
return err;
}
+static int __maybe_unused
+dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ unsigned long mask, new_prio;
+ int err, port = dp->index;
+
+ if (!ds->ops->port_set_default_prio)
+ return -EOPNOTSUPP;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ mask = dcb_ieee_getapp_mask(dev, app);
+ new_prio = __fls(mask);
+
+ err = ds->ops->port_set_default_prio(ds, port, new_prio);
+ if (err) {
+ dcb_ieee_delapp(dev, app);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ unsigned long mask, new_prio;
+ int err, port = dp->index;
+ u8 dscp = app->protocol;
+
+ if (!ds->ops->port_add_dscp_prio)
+ return -EOPNOTSUPP;
+
+ if (dscp >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ dscp);
+ return -EINVAL;
+ }
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ mask = dcb_ieee_getapp_mask(dev, app);
+ new_prio = __fls(mask);
+
+ err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
+ if (err) {
+ dcb_ieee_delapp(dev, app);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ switch (app->protocol) {
+ case 0:
+ return dsa_slave_dcbnl_set_default_prio(dev, app);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ return dsa_slave_dcbnl_add_dscp_prio(dev, app);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ unsigned long mask, new_prio;
+ int err, port = dp->index;
+
+ if (!ds->ops->port_set_default_prio)
+ return -EOPNOTSUPP;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ mask = dcb_ieee_getapp_mask(dev, app);
+ new_prio = mask ? __fls(mask) : 0;
+
+ err = ds->ops->port_set_default_prio(ds, port, new_prio);
+ if (err) {
+ dcb_ieee_setapp(dev, app);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused
+dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int err, port = dp->index;
+ u8 dscp = app->protocol;
+
+ if (!ds->ops->port_del_dscp_prio)
+ return -EOPNOTSUPP;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
+ if (err) {
+ dcb_ieee_setapp(dev, app);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ switch (app->protocol) {
+ case 0:
+ return dsa_slave_dcbnl_del_default_prio(dev, app);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ return dsa_slave_dcbnl_del_dscp_prio(dev, app);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Pre-populate the DCB application priority table with the priorities
+ * configured during switch setup, which we read from hardware here.
+ */
+static int dsa_slave_dcbnl_init(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ int err;
+
+ if (ds->ops->port_get_default_prio) {
+ int prio = ds->ops->port_get_default_prio(ds, port);
+ struct dcb_app app = {
+ .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+ .protocol = 0,
+ .priority = prio,
+ };
+
+ if (prio < 0)
+ return prio;
+
+ err = dcb_ieee_setapp(dev, &app);
+ if (err)
+ return err;
+ }
+
+ if (ds->ops->port_get_dscp_prio) {
+ int protocol;
+
+ for (protocol = 0; protocol < 64; protocol++) {
+ struct dcb_app app = {
+ .selector = IEEE_8021QAZ_APP_SEL_DSCP,
+ .protocol = protocol,
+ };
+ int prio;
+
+ prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
+ if (prio == -EOPNOTSUPP)
+ continue;
+ if (prio < 0)
+ return prio;
+
+ app.priority = prio;
+
+ err = dcb_ieee_setapp(dev, &app);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len,
@@ -1695,6 +2084,9 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
+ .get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
+ .get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
+ .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
.set_wol = dsa_slave_set_wol,
.get_wol = dsa_slave_get_wol,
.set_eee = dsa_slave_set_eee,
@@ -1709,11 +2101,16 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.self_test = dsa_slave_net_selftest,
};
+static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
+ .ieee_setapp = dsa_slave_dcbnl_ieee_setapp,
+ .ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
+};
+
static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- return dp->ds->devlink ? &dp->devlink_port : NULL;
+ return &dp->devlink_port;
}
static void dsa_slave_get_stats64(struct net_device *dev,
@@ -1758,10 +2155,8 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
.ndo_poll_controller = dsa_slave_poll_controller,
#endif
- .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
.ndo_setup_tc = dsa_slave_setup_tc,
.ndo_get_stats64 = dsa_slave_get_stats64,
- .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
.ndo_get_devlink_port = dsa_slave_get_devlink_port,
@@ -1817,14 +2212,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
- phy_interface_t mode;
u32 phy_flags = 0;
int ret;
- ret = of_get_phy_mode(port_dn, &mode);
- if (ret)
- mode = PHY_INTERFACE_MODE_NA;
-
dp->pl_config.dev = &slave_dev->dev;
dp->pl_config.type = PHYLINK_NETDEV;
@@ -1837,13 +2227,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
dp->pl_config.poll_fixed_state = true;
}
- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
- &dsa_port_phylink_mac_ops);
- if (IS_ERR(dp->pl)) {
- netdev_err(slave_dev,
- "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
- return PTR_ERR(dp->pl);
- }
+ ret = dsa_port_phylink_create(dp);
+ if (ret)
+ return ret;
if (ds->ops->get_phy_flags)
phy_flags = ds->ops->get_phy_flags(ds, dp->index);
@@ -1892,15 +2278,6 @@ void dsa_slave_setup_tagger(struct net_device *slave)
slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &dsa_slave_netdev_xmit_lock_key);
-}
-
int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1953,19 +2330,21 @@ int dsa_slave_create(struct dsa_port *port)
return -ENOMEM;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+#if IS_ENABLED(CONFIG_DCB)
+ slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
+#endif
if (!is_zero_ether_addr(port->mac))
eth_hw_addr_set(slave_dev, port->mac);
else
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
+ if (dsa_switch_supports_uc_filtering(ds))
+ slave_dev->priv_flags |= IFF_UNICAST_FLT;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
if (ds->ops->port_max_mtu)
slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
- netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
- NULL);
-
SET_NETDEV_DEV(slave_dev, port->ds->dev);
slave_dev->dev.of_node = port->dn;
slave_dev->vlan_features = master->vlan_features;
@@ -1986,13 +2365,6 @@ int dsa_slave_create(struct dsa_port *port)
port->slave = slave_dev;
dsa_slave_setup_tagger(slave_dev);
- rtnl_lock();
- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
- rtnl_unlock();
- if (ret && ret != -EOPNOTSUPP)
- dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
- ret, ETH_DATA_LEN, port->index);
-
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(slave_dev);
@@ -2005,6 +2377,11 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
+ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
+ ret, ETH_DATA_LEN, port->index);
+
ret = register_netdevice(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
@@ -2013,6 +2390,17 @@ int dsa_slave_create(struct dsa_port *port)
goto out_phy;
}
+ if (IS_ENABLED(CONFIG_DCB)) {
+ ret = dsa_slave_dcbnl_init(slave_dev);
+ if (ret) {
+ netdev_err(slave_dev,
+ "failed to initialize DCB: %pe\n",
+ ERR_PTR(ret));
+ rtnl_unlock();
+ goto out_unregister;
+ }
+ }
+
ret = netdev_upper_dev_link(master, slave_dev, NULL);
rtnl_unlock();
@@ -2149,7 +2537,7 @@ dsa_slave_lag_changeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
- if (!dp->lag_dev)
+ if (!dp->lag)
/* Software LAG */
continue;
@@ -2178,7 +2566,7 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
- if (!dp->lag_dev)
+ if (!dp->lag)
/* Software LAG */
continue;
@@ -2195,7 +2583,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
- struct net_device *slave;
+ struct net_device *slave, *br;
struct dsa_port *dp;
ext_ack = netdev_notifier_info_to_extack(&info->info);
@@ -2208,11 +2596,12 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
return NOTIFY_DONE;
dp = dsa_slave_to_port(slave);
- if (!dp->bridge_dev)
+ br = dsa_port_bridge_dev_get(dp);
+ if (!br)
return NOTIFY_DONE;
/* Deny enslaving a VLAN device into a VLAN-aware bridge */
- if (br_vlan_enabled(dp->bridge_dev) &&
+ if (br_vlan_enabled(br) &&
netif_is_bridge_master(info->upper_dev) && info->linking) {
NL_SET_ERR_MSG_MOD(ext_ack,
"Cannot enslave VLAN device into VLAN aware bridge");
@@ -2227,7 +2616,7 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct bridge_vlan_info br_info;
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
@@ -2334,7 +2723,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
dst = cpu_dp->ds->dst;
list_for_each_entry(dp, &dst->ports, list) {
- if (!dsa_is_user_port(dp->ds, dp->index))
+ if (!dsa_port_is_user(dp))
continue;
list_add(&dp->slave->close_list, &close_list);
@@ -2355,44 +2744,40 @@ static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
{
struct switchdev_notifier_fdb_info info = {};
- struct dsa_switch *ds = switchdev_work->ds;
- struct dsa_port *dp;
-
- if (!dsa_is_user_port(ds, switchdev_work->port))
- return;
info.addr = switchdev_work->addr;
info.vid = switchdev_work->vid;
info.offloaded = true;
- dp = dsa_to_port(ds, switchdev_work->port);
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
- dp->slave, &info.info, NULL);
+ switchdev_work->orig_dev, &info.info, NULL);
}
static void dsa_slave_switchdev_event_work(struct work_struct *work)
{
struct dsa_switchdev_event_work *switchdev_work =
container_of(work, struct dsa_switchdev_event_work, work);
- struct dsa_switch *ds = switchdev_work->ds;
+ const unsigned char *addr = switchdev_work->addr;
+ struct net_device *dev = switchdev_work->dev;
+ u16 vid = switchdev_work->vid;
+ struct dsa_switch *ds;
struct dsa_port *dp;
int err;
- dp = dsa_to_port(ds, switchdev_work->port);
+ dp = dsa_slave_to_port(dev);
+ ds = dp->ds;
- rtnl_lock();
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (switchdev_work->host_addr)
- err = dsa_port_host_fdb_add(dp, switchdev_work->addr,
- switchdev_work->vid);
+ err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
+ else if (dp->lag)
+ err = dsa_port_lag_fdb_add(dp, addr, vid);
else
- err = dsa_port_fdb_add(dp, switchdev_work->addr,
- switchdev_work->vid);
+ err = dsa_port_fdb_add(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to add %pM vid %d to fdb: %d\n",
- dp->index, switchdev_work->addr,
- switchdev_work->vid, err);
+ dp->index, addr, vid, err);
break;
}
dsa_fdb_offload_notify(switchdev_work);
@@ -2400,23 +2785,20 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (switchdev_work->host_addr)
- err = dsa_port_host_fdb_del(dp, switchdev_work->addr,
- switchdev_work->vid);
+ err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
+ else if (dp->lag)
+ err = dsa_port_lag_fdb_del(dp, addr, vid);
else
- err = dsa_port_fdb_del(dp, switchdev_work->addr,
- switchdev_work->vid);
+ err = dsa_port_fdb_del(dp, addr, vid);
if (err) {
dev_err(ds->dev,
"port %d failed to delete %pM vid %d from fdb: %d\n",
- dp->index, switchdev_work->addr,
- switchdev_work->vid, err);
+ dp->index, addr, vid, err);
}
break;
}
- rtnl_unlock();
- dev_put(switchdev_work->dev);
kfree(switchdev_work);
}
@@ -2427,7 +2809,7 @@ static bool dsa_foreign_dev_check(const struct net_device *dev,
struct dsa_switch_tree *dst = dp->ds->dst;
if (netif_is_bridge_master(foreign_dev))
- return !dsa_tree_offloads_bridge(dst, foreign_dev);
+ return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
if (netif_is_bridge_port(foreign_dev))
return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
@@ -2437,10 +2819,9 @@ static bool dsa_foreign_dev_check(const struct net_device *dev,
}
static int dsa_slave_fdb_event(struct net_device *dev,
- const struct net_device *orig_dev,
- const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info,
- unsigned long event)
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
{
struct dsa_switchdev_event_work *switchdev_work;
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -2450,19 +2831,20 @@ static int dsa_slave_fdb_event(struct net_device *dev,
if (ctx && ctx != dp)
return 0;
- if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
- return -EOPNOTSUPP;
-
- if (dsa_slave_dev_check(orig_dev) &&
- switchdev_fdb_is_dynamically_learned(fdb_info))
+ if (!dp->bridge)
return 0;
- /* FDB entries learned by the software bridge should be installed as
- * host addresses only if the driver requests assisted learning.
- */
- if (switchdev_fdb_is_dynamically_learned(fdb_info) &&
- !ds->assisted_learning_on_cpu_port)
- return 0;
+ if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
+ if (dsa_port_offloads_bridge_port(dp, orig_dev))
+ return 0;
+
+ /* FDB entries learned by the software bridge or by foreign
+ * bridge ports should be installed as host addresses only if
+ * the driver requests assisted learning.
+ */
+ if (!ds->assisted_learning_on_cpu_port)
+ return 0;
+ }
/* Also treat FDB entries on foreign interfaces bridged with us as host
* addresses.
@@ -2470,6 +2852,18 @@ static int dsa_slave_fdb_event(struct net_device *dev,
if (dsa_foreign_dev_check(dev, orig_dev))
host_addr = true;
+ /* Check early that we're not doing work in vain.
+ * Host addresses on LAG ports still require regular FDB ops,
+ * since the CPU port isn't in a LAG.
+ */
+ if (dp->lag && !host_addr) {
+ if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
+ return -EOPNOTSUPP;
+ } else {
+ if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
+ return -EOPNOTSUPP;
+ }
+
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return -ENOMEM;
@@ -2480,40 +2874,19 @@ static int dsa_slave_fdb_event(struct net_device *dev,
host_addr ? " as host address" : "");
INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
- switchdev_work->ds = ds;
- switchdev_work->port = dp->index;
switchdev_work->event = event;
switchdev_work->dev = dev;
+ switchdev_work->orig_dev = orig_dev;
ether_addr_copy(switchdev_work->addr, fdb_info->addr);
switchdev_work->vid = fdb_info->vid;
switchdev_work->host_addr = host_addr;
- /* Hold a reference for dsa_fdb_offload_notify */
- dev_hold(dev);
dsa_schedule_work(&switchdev_work->work);
return 0;
}
-static int
-dsa_slave_fdb_add_to_device(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info)
-{
- return dsa_slave_fdb_event(dev, orig_dev, ctx, fdb_info,
- SWITCHDEV_FDB_ADD_TO_DEVICE);
-}
-
-static int
-dsa_slave_fdb_del_to_device(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info)
-{
- return dsa_slave_fdb_event(dev, orig_dev, ctx, fdb_info,
- SWITCHDEV_FDB_DEL_TO_DEVICE);
-}
-
/* Called under rcu_read_lock() */
static int dsa_slave_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
@@ -2528,18 +2901,11 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
dsa_slave_port_attr_set);
return notifier_from_errno(err);
case SWITCHDEV_FDB_ADD_TO_DEVICE:
- err = switchdev_handle_fdb_add_to_device(dev, ptr,
- dsa_slave_dev_check,
- dsa_foreign_dev_check,
- dsa_slave_fdb_add_to_device,
- NULL);
- return notifier_from_errno(err);
case SWITCHDEV_FDB_DEL_TO_DEVICE:
- err = switchdev_handle_fdb_del_to_device(dev, ptr,
- dsa_slave_dev_check,
- dsa_foreign_dev_check,
- dsa_slave_fdb_del_to_device,
- NULL);
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ dsa_slave_dev_check,
+ dsa_foreign_dev_check,
+ dsa_slave_fdb_event);
return notifier_from_errno(err);
default:
return NOTIFY_DONE;
@@ -2556,14 +2922,16 @@ static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = switchdev_handle_port_obj_add(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_obj_add);
+ err = switchdev_handle_port_obj_add_foreign(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_foreign_dev_check,
+ dsa_slave_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
- err = switchdev_handle_port_obj_del(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_obj_del);
+ err = switchdev_handle_port_obj_del_foreign(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_foreign_dev_check,
+ dsa_slave_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index fb69f2f14234..5bc3e8fa9e1b 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -17,14 +17,11 @@
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
- int i;
-
- for (i = 0; i < ds->num_ports; ++i) {
- struct dsa_port *dp = dsa_to_port(ds, i);
+ struct dsa_port *dp;
+ dsa_switch_for_each_port(dp, ds)
if (dp->ageing_time && dp->ageing_time < ageing_time)
ageing_time = dp->ageing_time;
- }
return ageing_time;
}
@@ -49,35 +46,25 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
return 0;
}
-static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_mtu_info *info)
+static bool dsa_port_mtu_match(struct dsa_port *dp,
+ struct dsa_notifier_mtu_info *info)
{
- if (ds->index == info->sw_index && port == info->port)
- return true;
-
- /* Do not propagate to other switches in the tree if the notifier was
- * targeted for a single switch.
- */
- if (info->targeted_match)
- return false;
-
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- return true;
-
- return false;
+ return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
}
static int dsa_switch_mtu(struct dsa_switch *ds,
struct dsa_notifier_mtu_info *info)
{
- int port, ret;
+ struct dsa_port *dp;
+ int ret;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_mtu_match(ds, port, info)) {
- ret = ds->ops->port_change_mtu(ds, port, info->mtu);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_mtu_match(dp, info)) {
+ ret = ds->ops->port_change_mtu(ds, dp->index,
+ info->mtu);
if (ret)
return ret;
}
@@ -89,270 +76,329 @@ static int dsa_switch_mtu(struct dsa_switch *ds,
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
- struct dsa_switch_tree *dst = ds->dst;
int err;
- if (dst->index == info->tree_index && ds->index == info->sw_index) {
+ if (info->dp->ds == ds) {
if (!ds->ops->port_bridge_join)
return -EOPNOTSUPP;
- err = ds->ops->port_bridge_join(ds, info->port, info->br);
+ err = ds->ops->port_bridge_join(ds, info->dp->index,
+ info->bridge,
+ &info->tx_fwd_offload,
+ info->extack);
if (err)
return err;
}
- if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_join) {
- err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
- info->sw_index,
- info->port, info->br);
+ if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
+ err = ds->ops->crosschip_bridge_join(ds,
+ info->dp->ds->dst->index,
+ info->dp->ds->index,
+ info->dp->index,
+ info->bridge,
+ info->extack);
if (err)
return err;
}
- return dsa_tag_8021q_bridge_join(ds, info);
+ return 0;
}
static int dsa_switch_bridge_leave(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
- struct dsa_switch_tree *dst = ds->dst;
- struct netlink_ext_ack extack = {0};
- bool change_vlan_filtering = false;
- bool vlan_filtering;
- int err, port;
-
- if (dst->index == info->tree_index && ds->index == info->sw_index &&
- ds->ops->port_bridge_leave)
- ds->ops->port_bridge_leave(ds, info->port, info->br);
-
- if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_leave)
- ds->ops->crosschip_bridge_leave(ds, info->tree_index,
- info->sw_index, info->port,
- info->br);
-
- if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) {
- change_vlan_filtering = true;
- vlan_filtering = true;
- } else if (!ds->needs_standalone_vlan_filtering &&
- br_vlan_enabled(info->br)) {
- change_vlan_filtering = true;
- vlan_filtering = false;
- }
-
- /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
- * event for changing vlan_filtering setting upon slave ports leaving
- * it. That is a good thing, because that lets us handle it and also
- * handle the case where the switch's vlan_filtering setting is global
- * (not per port). When that happens, the correct moment to trigger the
- * vlan_filtering callback is only when the last port leaves the last
- * VLAN-aware bridge.
- */
- if (change_vlan_filtering && ds->vlan_filtering_is_global) {
- for (port = 0; port < ds->num_ports; port++) {
- struct net_device *bridge_dev;
-
- bridge_dev = dsa_to_port(ds, port)->bridge_dev;
-
- if (bridge_dev && br_vlan_enabled(bridge_dev)) {
- change_vlan_filtering = false;
- break;
- }
- }
- }
+ if (info->dp->ds == ds && ds->ops->port_bridge_leave)
+ ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
- if (change_vlan_filtering) {
- err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
- vlan_filtering, &extack);
- if (extack._msg)
- dev_err(ds->dev, "port %d: %s\n", info->port,
- extack._msg);
- if (err && err != -EOPNOTSUPP)
- return err;
- }
+ if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
+ ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
+ info->dp->ds->index,
+ info->dp->index,
+ info->bridge);
- return dsa_tag_8021q_bridge_leave(ds, info);
+ return 0;
}
/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
* DSA links) that sit between the targeted port on which the notifier was
* emitted and its dedicated CPU port.
*/
-static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
- int info_sw_index, int info_port)
+static bool dsa_port_host_address_match(struct dsa_port *dp,
+ const struct dsa_port *targeted_dp)
{
- struct dsa_port *targeted_dp, *cpu_dp;
- struct dsa_switch *targeted_ds;
-
- targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
- targeted_dp = dsa_to_port(targeted_ds, info_port);
- cpu_dp = targeted_dp->cpu_dp;
+ struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
- if (dsa_switch_is_upstream_of(ds, targeted_ds))
- return port == dsa_towards_port(ds, cpu_dp->ds->index,
- cpu_dp->index);
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
+ return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
+ cpu_dp->index);
return false;
}
static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
- const unsigned char *addr,
- u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct dsa_mac_addr *a;
list_for_each_entry(a, addr_list, list)
- if (ether_addr_equal(a->addr, addr) && a->vid == vid)
+ if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
+ dsa_db_equal(&a->db, &db))
return a;
return NULL;
}
-static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_do_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
- int err;
+ int port = dp->index;
+ int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_mdb_add(ds, port, mdb);
+ return ds->ops->port_mdb_add(ds, port, mdb, db);
+
+ mutex_lock(&dp->addr_lists_lock);
- a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
+ a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
if (a) {
refcount_inc(&a->refcount);
- return 0;
+ goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
+ if (!a) {
+ err = -ENOMEM;
+ goto out;
+ }
- err = ds->ops->port_mdb_add(ds, port, mdb);
+ err = ds->ops->port_mdb_add(ds, port, mdb, db);
if (err) {
kfree(a);
- return err;
+ goto out;
}
ether_addr_copy(a->addr, mdb->addr);
a->vid = mdb->vid;
+ a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->mdbs);
- return 0;
+out:
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
}
-static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_do_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
- int err;
+ int port = dp->index;
+ int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_mdb_del(ds, port, mdb);
+ return ds->ops->port_mdb_del(ds, port, mdb, db);
- a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
- if (!a)
- return -ENOENT;
+ mutex_lock(&dp->addr_lists_lock);
+
+ a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
+ if (!a) {
+ err = -ENOENT;
+ goto out;
+ }
if (!refcount_dec_and_test(&a->refcount))
- return 0;
+ goto out;
- err = ds->ops->port_mdb_del(ds, port, mdb);
+ err = ds->ops->port_mdb_del(ds, port, mdb, db);
if (err) {
refcount_set(&a->refcount, 1);
- return err;
+ goto out;
}
list_del(&a->list);
kfree(a);
- return 0;
+out:
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
}
-static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
- int err;
+ int port = dp->index;
+ int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_fdb_add(ds, port, addr, vid);
+ return ds->ops->port_fdb_add(ds, port, addr, vid, db);
- a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
+ mutex_lock(&dp->addr_lists_lock);
+
+ a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
if (a) {
refcount_inc(&a->refcount);
- return 0;
+ goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
+ if (!a) {
+ err = -ENOMEM;
+ goto out;
+ }
- err = ds->ops->port_fdb_add(ds, port, addr, vid);
+ err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
if (err) {
kfree(a);
- return err;
+ goto out;
}
ether_addr_copy(a->addr, addr);
a->vid = vid;
+ a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->fdbs);
- return 0;
+out:
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
}
-static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
- int err;
+ int port = dp->index;
+ int err = 0;
/* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_fdb_del(ds, port, addr, vid);
+ return ds->ops->port_fdb_del(ds, port, addr, vid, db);
- a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
- if (!a)
- return -ENOENT;
+ mutex_lock(&dp->addr_lists_lock);
+
+ a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
+ if (!a) {
+ err = -ENOENT;
+ goto out;
+ }
if (!refcount_dec_and_test(&a->refcount))
- return 0;
+ goto out;
- err = ds->ops->port_fdb_del(ds, port, addr, vid);
+ err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
if (err) {
refcount_set(&a->refcount, 1);
- return err;
+ goto out;
}
list_del(&a->list);
kfree(a);
- return 0;
+out:
+ mutex_unlock(&dp->addr_lists_lock);
+
+ return err;
+}
+
+static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct dsa_mac_addr *a;
+ int err = 0;
+
+ mutex_lock(&lag->fdb_lock);
+
+ a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
+ if (a) {
+ refcount_inc(&a->refcount);
+ goto out;
+ }
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
+ if (err) {
+ kfree(a);
+ goto out;
+ }
+
+ ether_addr_copy(a->addr, addr);
+ a->vid = vid;
+ refcount_set(&a->refcount, 1);
+ list_add_tail(&a->list, &lag->fdbs);
+
+out:
+ mutex_unlock(&lag->fdb_lock);
+
+ return err;
+}
+
+static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct dsa_mac_addr *a;
+ int err = 0;
+
+ mutex_lock(&lag->fdb_lock);
+
+ a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
+ if (!a) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (!refcount_dec_and_test(&a->refcount))
+ goto out;
+
+ err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
+ if (err) {
+ refcount_set(&a->refcount, 1);
+ goto out;
+ }
+
+ list_del(&a->list);
+ kfree(a);
+
+out:
+ mutex_unlock(&lag->fdb_lock);
+
+ return err;
}
static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_fdb_add(ds, port, info->addr,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
+ err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
+ info->db);
if (err)
break;
}
@@ -364,17 +410,16 @@ static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_fdb_del(ds, port, info->addr,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
+ err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
+ info->db);
if (err)
break;
}
@@ -386,52 +431,72 @@ static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
+ return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
}
static int dsa_switch_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
+ return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
}
-static int dsa_switch_hsr_join(struct dsa_switch *ds,
- struct dsa_notifier_hsr_info *info)
+static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_lag_fdb_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_hsr_join)
- return ds->ops->port_hsr_join(ds, info->port, info->hsr);
+ struct dsa_port *dp;
- return -EOPNOTSUPP;
+ if (!ds->ops->lag_fdb_add)
+ return -EOPNOTSUPP;
+
+ /* Notify switch only if it has a port in this LAG */
+ dsa_switch_for_each_port(dp, ds)
+ if (dsa_port_offloads_lag(dp, info->lag))
+ return dsa_switch_do_lag_fdb_add(ds, info->lag,
+ info->addr, info->vid,
+ info->db);
+
+ return 0;
}
-static int dsa_switch_hsr_leave(struct dsa_switch *ds,
- struct dsa_notifier_hsr_info *info)
+static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_lag_fdb_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
- return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
+ struct dsa_port *dp;
- return -EOPNOTSUPP;
+ if (!ds->ops->lag_fdb_del)
+ return -EOPNOTSUPP;
+
+ /* Notify switch only if it has a port in this LAG */
+ dsa_switch_for_each_port(dp, ds)
+ if (dsa_port_offloads_lag(dp, info->lag))
+ return dsa_switch_do_lag_fdb_del(ds, info->lag,
+ info->addr, info->vid,
+ info->db);
+
+ return 0;
}
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_change)
- return ds->ops->port_lag_change(ds, info->port);
+ if (info->dp->ds == ds && ds->ops->port_lag_change)
+ return ds->ops->port_lag_change(ds, info->dp->index);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
- return ds->ops->crosschip_lag_change(ds, info->sw_index,
- info->port);
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
+ return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
+ info->dp->index);
return 0;
}
@@ -439,13 +504,13 @@ static int dsa_switch_lag_change(struct dsa_switch *ds,
static int dsa_switch_lag_join(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_join)
- return ds->ops->port_lag_join(ds, info->port, info->lag,
+ if (info->dp->ds == ds && ds->ops->port_lag_join)
+ return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
info->info);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
- return ds->ops->crosschip_lag_join(ds, info->sw_index,
- info->port, info->lag,
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
+ return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
+ info->dp->index, info->lag,
info->info);
return -EOPNOTSUPP;
@@ -454,12 +519,12 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
static int dsa_switch_lag_leave(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
- if (ds->index == info->sw_index && ds->ops->port_lag_leave)
- return ds->ops->port_lag_leave(ds, info->port, info->lag);
+ if (info->dp->ds == ds && ds->ops->port_lag_leave)
+ return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
- if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
- return ds->ops->crosschip_lag_leave(ds, info->sw_index,
- info->port, info->lag);
+ if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
+ return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
+ info->dp->index, info->lag);
return -EOPNOTSUPP;
}
@@ -467,38 +532,39 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
static int dsa_switch_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
- return dsa_switch_do_mdb_add(ds, port, info->mdb);
+ return dsa_port_do_mdb_add(dp, info->mdb, info->db);
}
static int dsa_switch_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
- int port = dsa_towards_port(ds, info->sw_index, info->port);
+ int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
- return dsa_switch_do_mdb_del(ds, port, info->mdb);
+ return dsa_port_do_mdb_del(dp, info->mdb, info->db);
}
static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_mdb_add(ds, port, info->mdb);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
+ err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
if (err)
break;
}
@@ -510,16 +576,15 @@ static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_mdb_del(ds, port, info->mdb);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->dp)) {
+ err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
if (err)
break;
}
@@ -528,30 +593,141 @@ static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
return err;
}
-static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_vlan_info *info)
+/* Port VLANs match on the targeted port and on all DSA ports */
+static bool dsa_port_vlan_match(struct dsa_port *dp,
+ struct dsa_notifier_vlan_info *info)
+{
+ return dsa_port_is_dsa(dp) || dp == info->dp;
+}
+
+/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
+ * (upstream and downstream) of that switch and its upstream switches.
+ */
+static bool dsa_port_host_vlan_match(struct dsa_port *dp,
+ const struct dsa_port *targeted_dp)
{
- if (ds->index == info->sw_index && port == info->port)
- return true;
+ struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
- if (dsa_is_dsa_port(ds, port))
- return true;
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
+ return dsa_port_is_dsa(dp) || dp == cpu_dp;
return false;
}
+static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_vlan *v;
+
+ list_for_each_entry(v, vlan_list, list)
+ if (v->vid == vlan->vid)
+ return v;
+
+ return NULL;
+}
+
+static int dsa_port_do_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ struct dsa_vlan *v;
+ int err = 0;
+
+ /* No need to bother with refcounting for user ports. */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_vlan_add(ds, port, vlan, extack);
+
+ /* No need to propagate on shared ports the existing VLANs that were
+ * re-notified after just the flags have changed. This would cause a
+ * refcount bump which we need to avoid, since it unbalances the
+ * additions with the deletions.
+ */
+ if (vlan->changed)
+ return 0;
+
+ mutex_lock(&dp->vlans_lock);
+
+ v = dsa_vlan_find(&dp->vlans, vlan);
+ if (v) {
+ refcount_inc(&v->refcount);
+ goto out;
+ }
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ds->ops->port_vlan_add(ds, port, vlan, extack);
+ if (err) {
+ kfree(v);
+ goto out;
+ }
+
+ v->vid = vlan->vid;
+ refcount_set(&v->refcount, 1);
+ list_add_tail(&v->list, &dp->vlans);
+
+out:
+ mutex_unlock(&dp->vlans_lock);
+
+ return err;
+}
+
+static int dsa_port_do_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ struct dsa_vlan *v;
+ int err = 0;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_vlan_del(ds, port, vlan);
+
+ mutex_lock(&dp->vlans_lock);
+
+ v = dsa_vlan_find(&dp->vlans, vlan);
+ if (!v) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (!refcount_dec_and_test(&v->refcount))
+ goto out;
+
+ err = ds->ops->port_vlan_del(ds, port, vlan);
+ if (err) {
+ refcount_set(&v->refcount, 1);
+ goto out;
+ }
+
+ list_del(&v->list);
+ kfree(v);
+
+out:
+ mutex_unlock(&dp->vlans_lock);
+
+ return err;
+}
+
static int dsa_switch_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
if (!ds->ops->port_vlan_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_vlan_match(ds, port, info)) {
- err = ds->ops->port_vlan_add(ds, port, info->vlan,
- info->extack);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_vlan_match(dp, info)) {
+ err = dsa_port_do_vlan_add(dp, info->vlan,
+ info->extack);
if (err)
return err;
}
@@ -563,15 +739,61 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
static int dsa_switch_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
+ struct dsa_port *dp;
+ int err;
+
if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
- if (ds->index == info->sw_index)
- return ds->ops->port_vlan_del(ds, info->port, info->vlan);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_vlan_match(dp, info)) {
+ err = dsa_port_do_vlan_del(dp, info->vlan);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ struct dsa_port *dp;
+ int err;
+
+ if (!ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_vlan_match(dp, info->dp)) {
+ err = dsa_port_do_vlan_add(dp, info->vlan,
+ info->extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ struct dsa_port *dp;
+ int err;
+
+ if (!ds->ops->port_vlan_del)
+ return -EOPNOTSUPP;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_vlan_match(dp, info->dp)) {
+ err = dsa_port_do_vlan_del(dp, info->vlan);
+ if (err)
+ return err;
+ }
+ }
- /* Do not deprogram the DSA links as they may be used as conduit
- * for other VLAN members in the fabric.
- */
return 0;
}
@@ -579,92 +801,90 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
struct dsa_notifier_tag_proto_info *info)
{
const struct dsa_device_ops *tag_ops = info->tag_ops;
- int port, err;
+ struct dsa_port *dp, *cpu_dp;
+ int err;
if (!ds->ops->change_tag_protocol)
return -EOPNOTSUPP;
ASSERT_RTNL();
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
+ tag_ops->proto);
if (err)
return err;
- dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
+ dsa_port_set_tag_protocol(cpu_dp, tag_ops);
}
/* Now that changing the tag protocol can no longer fail, let's update
* the remaining bits which are "duplicated for faster access", and the
* bits that depend on the tagger, such as the MTU.
*/
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_user_port(ds, port)) {
- struct net_device *slave;
+ dsa_switch_for_each_user_port(dp, ds) {
+ struct net_device *slave = dp->slave;
- slave = dsa_to_port(ds, port)->slave;
- dsa_slave_setup_tagger(slave);
+ dsa_slave_setup_tagger(slave);
- /* rtnl_mutex is held in dsa_tree_change_tag_proto */
- dsa_slave_change_mtu(slave, slave->mtu);
- }
+ /* rtnl_mutex is held in dsa_tree_change_tag_proto */
+ dsa_slave_change_mtu(slave, slave->mtu);
}
return 0;
}
-static int dsa_switch_mrp_add(struct dsa_switch *ds,
- struct dsa_notifier_mrp_info *info)
-{
- if (!ds->ops->port_mrp_add)
- return -EOPNOTSUPP;
-
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_add(ds, info->port, info->mrp);
-
- return 0;
-}
-
-static int dsa_switch_mrp_del(struct dsa_switch *ds,
- struct dsa_notifier_mrp_info *info)
+/* We use the same cross-chip notifiers to inform both the tagger side, as well
+ * as the switch side, of connection and disconnection events.
+ * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
+ * switch side doesn't support connecting to this tagger, and therefore, the
+ * fact that we don't disconnect the tagger side doesn't constitute a memory
+ * leak: the tagger will still operate with persistent per-switch memory, just
+ * with the switch side unconnected to it. What does constitute a hard error is
+ * when the switch side supports connecting but fails.
+ */
+static int
+dsa_switch_connect_tag_proto(struct dsa_switch *ds,
+ struct dsa_notifier_tag_proto_info *info)
{
- if (!ds->ops->port_mrp_del)
- return -EOPNOTSUPP;
-
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_del(ds, info->port, info->mrp);
+ const struct dsa_device_ops *tag_ops = info->tag_ops;
+ int err;
- return 0;
-}
+ /* Notify the new tagger about the connection to this switch */
+ if (tag_ops->connect) {
+ err = tag_ops->connect(ds);
+ if (err)
+ return err;
+ }
-static int
-dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
- struct dsa_notifier_mrp_ring_role_info *info)
-{
- if (!ds->ops->port_mrp_add_ring_role)
+ if (!ds->ops->connect_tag_protocol)
return -EOPNOTSUPP;
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_add_ring_role(ds, info->port,
- info->mrp);
+ /* Notify the switch about the connection to the new tagger */
+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
+ if (err) {
+ /* Revert the new tagger's connection to this tree */
+ if (tag_ops->disconnect)
+ tag_ops->disconnect(ds);
+ return err;
+ }
return 0;
}
static int
-dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
- struct dsa_notifier_mrp_ring_role_info *info)
+dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
+ struct dsa_notifier_tag_proto_info *info)
{
- if (!ds->ops->port_mrp_del_ring_role)
- return -EOPNOTSUPP;
+ const struct dsa_device_ops *tag_ops = info->tag_ops;
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_del_ring_role(ds, info->port,
- info->mrp);
+ /* Notify the tagger about the disconnection from this switch */
+ if (tag_ops->disconnect && ds->tagger_data)
+ tag_ops->disconnect(ds);
+ /* No need to notify the switch, since it shouldn't have any
+ * resources to tear down
+ */
return 0;
}
@@ -696,11 +916,11 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_HOST_FDB_DEL:
err = dsa_switch_host_fdb_del(ds, info);
break;
- case DSA_NOTIFIER_HSR_JOIN:
- err = dsa_switch_hsr_join(ds, info);
+ case DSA_NOTIFIER_LAG_FDB_ADD:
+ err = dsa_switch_lag_fdb_add(ds, info);
break;
- case DSA_NOTIFIER_HSR_LEAVE:
- err = dsa_switch_hsr_leave(ds, info);
+ case DSA_NOTIFIER_LAG_FDB_DEL:
+ err = dsa_switch_lag_fdb_del(ds, info);
break;
case DSA_NOTIFIER_LAG_CHANGE:
err = dsa_switch_lag_change(ds, info);
@@ -729,23 +949,23 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_VLAN_DEL:
err = dsa_switch_vlan_del(ds, info);
break;
+ case DSA_NOTIFIER_HOST_VLAN_ADD:
+ err = dsa_switch_host_vlan_add(ds, info);
+ break;
+ case DSA_NOTIFIER_HOST_VLAN_DEL:
+ err = dsa_switch_host_vlan_del(ds, info);
+ break;
case DSA_NOTIFIER_MTU:
err = dsa_switch_mtu(ds, info);
break;
case DSA_NOTIFIER_TAG_PROTO:
err = dsa_switch_change_tag_proto(ds, info);
break;
- case DSA_NOTIFIER_MRP_ADD:
- err = dsa_switch_mrp_add(ds, info);
- break;
- case DSA_NOTIFIER_MRP_DEL:
- err = dsa_switch_mrp_del(ds, info);
- break;
- case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
- err = dsa_switch_mrp_add_ring_role(ds, info);
+ case DSA_NOTIFIER_TAG_PROTO_CONNECT:
+ err = dsa_switch_connect_tag_proto(ds, info);
break;
- case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
- err = dsa_switch_mrp_del_ring_role(ds, info);
+ case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
+ err = dsa_switch_disconnect_tag_proto(ds, info);
break;
case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
err = dsa_switch_tag_8021q_vlan_add(ds, info);
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index e443088ab0f6..185046d4dcc2 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -17,15 +17,11 @@
*
* | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +-----------+-----+-----------------+-----------+-----------------------+
- * | DIR | VBID| SWITCH_ID | VBID | PORT |
+ * | RSV | VBID| SWITCH_ID | VBID | PORT |
* +-----------+-----+-----------------+-----------+-----------------------+
*
- * DIR - VID[11:10]:
- * Direction flags.
- * * 1 (0b01) for RX VLAN,
- * * 2 (0b10) for TX VLAN.
- * These values make the special VIDs of 0, 1 and 4095 to be left
- * unused by this coding scheme.
+ * RSV - VID[11:10]:
+ * Reserved. Must be set to 3 (0b11).
*
* SWITCH_ID - VID[8:6]:
* Index of switch within DSA tree. Must be between 0 and 7.
@@ -33,18 +29,17 @@
* VBID - { VID[9], VID[5:4] }:
* Virtual bridge ID. If between 1 and 7, packet targets the broadcast
* domain of a bridge. If transmitted as zero, packet targets a single
- * port. Field only valid on transmit, must be ignored on receive.
+ * port.
*
* PORT - VID[3:0]:
* Index of switch port. Must be between 0 and 15.
*/
-#define DSA_8021Q_DIR_SHIFT 10
-#define DSA_8021Q_DIR_MASK GENMASK(11, 10)
-#define DSA_8021Q_DIR(x) (((x) << DSA_8021Q_DIR_SHIFT) & \
- DSA_8021Q_DIR_MASK)
-#define DSA_8021Q_DIR_RX DSA_8021Q_DIR(1)
-#define DSA_8021Q_DIR_TX DSA_8021Q_DIR(2)
+#define DSA_8021Q_RSV_VAL 3
+#define DSA_8021Q_RSV_SHIFT 10
+#define DSA_8021Q_RSV_MASK GENMASK(11, 10)
+#define DSA_8021Q_RSV ((DSA_8021Q_RSV_VAL << DSA_8021Q_RSV_SHIFT) & \
+ DSA_8021Q_RSV_MASK)
#define DSA_8021Q_SWITCH_ID_SHIFT 6
#define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6)
@@ -68,32 +63,24 @@
#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
DSA_8021Q_PORT_MASK)
-u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num)
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num)
{
- /* The VBID value of 0 is reserved for precise TX */
- return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num + 1);
-}
-EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid);
-
-/* Returns the VID to be inserted into the frame from xmit for switch steering
- * instructions on egress. Encodes switch ID and port ID.
- */
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
-{
- return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(ds->index) |
- DSA_8021Q_PORT(port);
+ /* The VBID value of 0 is reserved for precise TX, but it is also
+ * reserved/invalid for the bridge_num, so all is well.
+ */
+ return DSA_8021Q_RSV | DSA_8021Q_VBID(bridge_num);
}
-EXPORT_SYMBOL_GPL(dsa_8021q_tx_vid);
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_vid);
/* Returns the VID that will be installed as pvid for this switch port, sent as
* tagged egress towards the CPU port and decoded by the rcv function.
*/
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp)
{
- return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
- DSA_8021Q_PORT(port);
+ return DSA_8021Q_RSV | DSA_8021Q_SWITCH_ID(dp->ds->index) |
+ DSA_8021Q_PORT(dp->index);
}
-EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid);
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_standalone_vid);
/* Returns the decoded switch ID from the RX VID. */
int dsa_8021q_rx_switch_id(u16 vid)
@@ -109,21 +96,20 @@ int dsa_8021q_rx_source_port(u16 vid)
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
-bool vid_is_dsa_8021q_rxvlan(u16 vid)
+/* Returns the decoded VBID from the RX VID. */
+static int dsa_tag_8021q_rx_vbid(u16 vid)
{
- return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX;
-}
-EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_rxvlan);
+ u16 vbid_hi = (vid & DSA_8021Q_VBID_HI_MASK) >> DSA_8021Q_VBID_HI_SHIFT;
+ u16 vbid_lo = (vid & DSA_8021Q_VBID_LO_MASK) >> DSA_8021Q_VBID_LO_SHIFT;
-bool vid_is_dsa_8021q_txvlan(u16 vid)
-{
- return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_TX;
+ return (vbid_hi << 2) | vbid_lo;
}
-EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_txvlan);
bool vid_is_dsa_8021q(u16 vid)
{
- return vid_is_dsa_8021q_rxvlan(vid) || vid_is_dsa_8021q_txvlan(vid);
+ u16 rsv = (vid & DSA_8021Q_RSV_MASK) >> DSA_8021Q_RSV_SHIFT;
+
+ return rsv == DSA_8021Q_RSV_VAL;
}
EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
@@ -139,12 +125,13 @@ dsa_tag_8021q_vlan_find(struct dsa_8021q_context *ctx, int port, u16 vid)
return NULL;
}
-static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port,
- u16 vid, u16 flags)
+static int dsa_port_do_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid,
+ u16 flags)
{
- struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
+ struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -175,12 +162,12 @@ static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port,
- u16 vid)
+static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
{
- struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
+ struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -207,22 +194,17 @@ static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port,
}
static bool
-dsa_switch_tag_8021q_vlan_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_tag_8021q_vlan_info *info)
+dsa_port_tag_8021q_vlan_match(struct dsa_port *dp,
+ struct dsa_notifier_tag_8021q_vlan_info *info)
{
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- return true;
-
- if (ds->dst->index == info->tree_index && ds->index == info->sw_index)
- return port == info->port;
-
- return false;
+ return dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp) || dp == info->dp;
}
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
/* Since we use dsa_broadcast(), there might be other switches in other
* trees which don't support tag_8021q, so don't return an error.
@@ -232,21 +214,16 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
if (!ds->ops->tag_8021q_vlan_add || !ds->tag_8021q_ctx)
return 0;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) {
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_tag_8021q_vlan_match(dp, info)) {
u16 flags = 0;
- if (dsa_is_user_port(ds, port))
- flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (dsa_port_is_user(dp))
+ flags |= BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_PVID;
- if (vid_is_dsa_8021q_rxvlan(info->vid) &&
- dsa_8021q_rx_switch_id(info->vid) == ds->index &&
- dsa_8021q_rx_source_port(info->vid) == port)
- flags |= BRIDGE_VLAN_INFO_PVID;
-
- err = dsa_switch_do_tag_8021q_vlan_add(ds, port,
- info->vid,
- flags);
+ err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid,
+ flags);
if (err)
return err;
}
@@ -258,15 +235,15 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
if (!ds->ops->tag_8021q_vlan_del || !ds->tag_8021q_ctx)
return 0;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) {
- err = dsa_switch_do_tag_8021q_vlan_del(ds, port,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_tag_8021q_vlan_match(dp, info)) {
+ err = dsa_port_do_tag_8021q_vlan_del(dp, info->vid);
if (err)
return err;
}
@@ -275,166 +252,78 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
return 0;
}
-/* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single
- * front-panel switch port (here swp0).
+/* There are 2 ways of offloading tag_8021q VLANs.
*
- * Port identification through VLAN (802.1Q) tags has different requirements
- * for it to work effectively:
- * - On RX (ingress from network): each front-panel port must have a pvid
- * that uniquely identifies it, and the egress of this pvid must be tagged
- * towards the CPU port, so that software can recover the source port based
- * on the VID in the frame. But this would only work for standalone ports;
- * if bridged, this VLAN setup would break autonomous forwarding and would
- * force all switched traffic to pass through the CPU. So we must also make
- * the other front-panel ports members of this VID we're adding, albeit
- * we're not making it their PVID (they'll still have their own).
- * - On TX (ingress from CPU and towards network) we are faced with a problem.
- * If we were to tag traffic (from within DSA) with the port's pvid, all
- * would be well, assuming the switch ports were standalone. Frames would
- * have no choice but to be directed towards the correct front-panel port.
- * But because we also want the RX VLAN to not break bridging, then
- * inevitably that means that we have to give them a choice (of what
- * front-panel port to go out on), and therefore we cannot steer traffic
- * based on the RX VID. So what we do is simply install one more VID on the
- * front-panel and CPU ports, and profit off of the fact that steering will
- * work just by virtue of the fact that there is only one other port that's
- * a member of the VID we're tagging the traffic with - the desired one.
+ * One is to use a hardware TCAM to push the port's standalone VLAN into the
+ * frame when forwarding it to the CPU, as an egress modification rule on the
+ * CPU port. This is preferable because it has no side effects for the
+ * autonomous forwarding path, and accomplishes tag_8021q's primary goal of
+ * identifying the source port of each packet based on VLAN ID.
*
- * So at the end, each front-panel port will have one RX VID (also the PVID),
- * the RX VID of all other front-panel ports that are in the same bridge, and
- * one TX VID. Whereas the CPU port will have the RX and TX VIDs of all
- * front-panel ports, and on top of that, is also tagged-input and
- * tagged-output (VLAN trunk).
+ * The other is to commit the tag_8021q VLAN as a PVID to the VLAN table, and
+ * to configure the port as VLAN-unaware. This is less preferable because
+ * unique source port identification can only be done for standalone ports;
+ * under a VLAN-unaware bridge, all ports share the same tag_8021q VLAN as
+ * PVID, and under a VLAN-aware bridge, packets received by software will not
+ * have tag_8021q VLANs appended, just bridge VLANs.
*
- * CPU port CPU port
- * +-------------+-----+-------------+ +-------------+-----+-------------+
- * | RX VID | | | | TX VID | | |
- * | of swp0 | | | | of swp0 | | |
- * | +-----+ | | +-----+ |
- * | ^ T | | | Tagged |
- * | | | | | ingress |
- * | +-------+---+---+-------+ | | +-----------+ |
- * | | | | | | | | Untagged |
- * | | U v U v U v | | v egress |
- * | +-----+ +-----+ +-----+ +-----+ | | +-----+ +-----+ +-----+ +-----+ |
- * | | | | | | | | | | | | | | | | | | | |
- * | |PVID | | | | | | | | | | | | | | | | | |
- * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
- * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
+ * For tag_8021q implementations of the second type, this method is used to
+ * replace the standalone tag_8021q VLAN of a port with the tag_8021q VLAN to
+ * be used for VLAN-unaware bridging.
*/
-static bool dsa_tag_8021q_bridge_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_bridge_info *info)
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port);
+ u16 standalone_vid, bridge_vid;
+ int err;
- /* Don't match on self */
- if (ds->dst->index == info->tree_index &&
- ds->index == info->sw_index &&
- port == info->port)
- return false;
-
- if (dsa_port_is_user(dp))
- return dp->bridge_dev == info->br;
-
- return false;
-}
-
-int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
- struct dsa_notifier_bridge_info *info)
-{
- struct dsa_switch *targeted_ds;
- struct dsa_port *targeted_dp;
- u16 targeted_rx_vid;
- int err, port;
-
- if (!ds->tag_8021q_ctx)
- return 0;
-
- targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
- targeted_dp = dsa_to_port(targeted_ds, info->port);
- targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port);
-
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
-
- if (!dsa_tag_8021q_bridge_match(ds, port, info))
- continue;
+ /* Delete the standalone VLAN of the port and replace it with a
+ * bridging VLAN
+ */
+ standalone_vid = dsa_tag_8021q_standalone_vid(dp);
+ bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num);
- /* Install the RX VID of the targeted port in our VLAN table */
- err = dsa_port_tag_8021q_vlan_add(dp, targeted_rx_vid, true);
- if (err)
- return err;
+ err = dsa_port_tag_8021q_vlan_add(dp, bridge_vid, true);
+ if (err)
+ return err;
- /* Install our RX VID into the targeted port's VLAN table */
- err = dsa_port_tag_8021q_vlan_add(targeted_dp, rx_vid, true);
- if (err)
- return err;
- }
+ dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
return 0;
}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
-int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
- struct dsa_notifier_bridge_info *info)
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
- struct dsa_switch *targeted_ds;
- struct dsa_port *targeted_dp;
- u16 targeted_rx_vid;
- int port;
-
- if (!ds->tag_8021q_ctx)
- return 0;
-
- targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
- targeted_dp = dsa_to_port(targeted_ds, info->port);
- targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port);
-
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
-
- if (!dsa_tag_8021q_bridge_match(ds, port, info))
- continue;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ u16 standalone_vid, bridge_vid;
+ int err;
- /* Remove the RX VID of the targeted port from our VLAN table */
- dsa_port_tag_8021q_vlan_del(dp, targeted_rx_vid, true);
+ /* Delete the bridging VLAN of the port and replace it with a
+ * standalone VLAN
+ */
+ standalone_vid = dsa_tag_8021q_standalone_vid(dp);
+ bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num);
- /* Remove our RX VID from the targeted port's VLAN table */
- dsa_port_tag_8021q_vlan_del(targeted_dp, rx_vid, true);
+ err = dsa_port_tag_8021q_vlan_add(dp, standalone_vid, false);
+ if (err) {
+ dev_err(ds->dev,
+ "Failed to delete tag_8021q standalone VLAN %d from port %d: %pe\n",
+ standalone_vid, port, ERR_PTR(err));
}
- return 0;
+ dsa_port_tag_8021q_vlan_del(dp, bridge_vid, true);
}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_leave);
-int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
-
- return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid,
- true);
-}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload);
-
-void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
-
- dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true);
-}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_unoffload);
-
-/* Set up a port's tag_8021q RX and TX VLAN for standalone mode operation */
+/* Set up a port's standalone tag_8021q VLAN */
static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
- u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 vid = dsa_tag_8021q_standalone_vid(dp);
struct net_device *master;
int err;
@@ -446,30 +335,16 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
master = dp->cpu_dp->master;
- /* Add this user port's RX VID to the membership list of all others
- * (including itself). This is so that bridging will not be hindered.
- * L2 forwarding rules still take precedence when there are no VLAN
- * restrictions, so there are no concerns about leaking traffic.
- */
- err = dsa_port_tag_8021q_vlan_add(dp, rx_vid, false);
+ err = dsa_port_tag_8021q_vlan_add(dp, vid, false);
if (err) {
dev_err(ds->dev,
- "Failed to apply RX VID %d to port %d: %pe\n",
- rx_vid, port, ERR_PTR(err));
+ "Failed to apply standalone VID %d to port %d: %pe\n",
+ vid, port, ERR_PTR(err));
return err;
}
- /* Add @rx_vid to the master's RX filter. */
- vlan_vid_add(master, ctx->proto, rx_vid);
-
- /* Finally apply the TX VID on this port and on the CPU port */
- err = dsa_port_tag_8021q_vlan_add(dp, tx_vid, false);
- if (err) {
- dev_err(ds->dev,
- "Failed to apply TX VID %d on port %d: %pe\n",
- tx_vid, port, ERR_PTR(err));
- return err;
- }
+ /* Add the VLAN to the master's RX filter. */
+ vlan_vid_add(master, ctx->proto, vid);
return err;
}
@@ -478,8 +353,7 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
- u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 vid = dsa_tag_8021q_standalone_vid(dp);
struct net_device *master;
/* The CPU port is implicitly configured by
@@ -490,11 +364,9 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
master = dp->cpu_dp->master;
- dsa_port_tag_8021q_vlan_del(dp, rx_vid, false);
-
- vlan_vid_del(master, ctx->proto, rx_vid);
+ dsa_port_tag_8021q_vlan_del(dp, vid, false);
- dsa_port_tag_8021q_vlan_del(dp, tx_vid, false);
+ vlan_vid_del(master, ctx->proto, vid);
}
static int dsa_tag_8021q_setup(struct dsa_switch *ds)
@@ -582,23 +454,57 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id)
+struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
+ int vbid)
+{
+ struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ struct dsa_port *dp;
+
+ if (WARN_ON(!vbid))
+ return NULL;
+
+ dsa_tree_for_each_user_port(dp, dst) {
+ if (!dp->bridge)
+ continue;
+
+ if (dp->stp_state != BR_STATE_LEARNING &&
+ dp->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ if (dp->cpu_dp != cpu_dp)
+ continue;
+
+ if (dsa_port_bridge_num_get(dp) == vbid)
+ return dp->slave;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
+
+void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
+ int *vbid)
{
u16 vid, tci;
- skb_push_rcsum(skb, ETH_HLEN);
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
+ skb_pull_rcsum(skb, ETH_HLEN);
}
- skb_pull_rcsum(skb, ETH_HLEN);
vid = tci & VLAN_VID_MASK;
*source_port = dsa_8021q_rx_source_port(vid);
*switch_id = dsa_8021q_rx_switch_id(vid);
+
+ if (vbid)
+ *vbid = dsa_tag_8021q_rx_vbid(vid);
+
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index b3da4b2ea11c..e4b6e3f2a3db 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -127,11 +127,13 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *br_dev;
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
u8 *dsa_header;
if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct dsa_switch_tree *dst = dp->ds->dst;
cmd = DSA_CMD_FORWARD;
@@ -140,7 +142,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
* packets on behalf of a virtual switch device with an index
* past the physical switches.
*/
- tag_dev = dst->last_switch + 1 + dp->bridge_num;
+ tag_dev = dst->last_switch + bridge_num;
tag_port = 0;
} else {
cmd = DSA_CMD_FROM_CPU;
@@ -148,7 +150,16 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
tag_port = dp->index;
}
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ br_dev = dsa_port_bridge_dev_get(dp);
+
+ /* If frame is already 802.1Q tagged, we can convert it to a DSA
+ * tag (avoiding a memmove), but only if the port is standalone
+ * (in which case we always send FROM_CPU) or if the port's
+ * bridge has VLAN filtering enabled (in which case the CPU port
+ * will be a member of the VLAN).
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ (!br_dev || br_vlan_enabled(br_dev))) {
if (extra) {
skb_push(skb, extra);
dsa_alloc_etype_header(skb, extra);
@@ -165,10 +176,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
dsa_header[2] &= ~0x10;
}
} else {
- struct net_device *br = dp->bridge_dev;
u16 vid;
- vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+ vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
skb_push(skb, DSA_HLEN + extra);
dsa_alloc_etype_header(skb, DSA_HLEN + extra);
@@ -245,12 +255,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_lag *lag;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
- skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
+ lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
+ skb->dev = lag ? lag->dev : NULL;
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 6e0518aa3a4d..0d81f172b7a6 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -4,14 +4,55 @@
#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
+ * payload and move it into the DSA tag, which will make the switch classify
+ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+ * which is the pvid of standalone and VLAN-unaware bridge ports.
+ */
+static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
+ u64 *vlan_tci, u64 *tag_type)
+{
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ struct vlan_ethhdr *hdr;
+ u16 proto, tci;
+
+ if (!br || !br_vlan_enabled(br)) {
+ *vlan_tci = 0;
+ *tag_type = IFH_TAG_TYPE_C;
+ return;
+ }
+
+ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ br_vlan_get_proto(br, &proto);
+
+ if (ntohs(hdr->h_vlan_proto) == proto) {
+ __skb_vlan_pop(skb, &tci);
+ *vlan_tci = tci;
+ } else {
+ rcu_read_lock();
+ br_vlan_get_pvid_rcu(br, &tci);
+ rcu_read_unlock();
+ *vlan_tci = tci;
+ }
+
+ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+}
+
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
struct dsa_switch *ds = dp->ds;
+ u64 vlan_tci, tag_type;
void *injection;
__be32 *prefix;
u32 rew_op = 0;
+ u64 qos_class;
+
+ ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+
+ qos_class = netdev_get_num_tc(netdev) ?
+ netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
injection = skb_push(skb, OCELOT_TAG_LEN);
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -20,7 +61,9 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
memset(injection, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(injection, 1);
ocelot_ifh_set_src(injection, ds->num_ports);
- ocelot_ifh_set_qos_class(injection, skb->priority);
+ ocelot_ifh_set_qos_class(injection, qos_class);
+ ocelot_ifh_set_vlan_tci(injection, vlan_tci);
+ ocelot_ifh_set_tag_type(injection, tag_type);
rew_op = ocelot_ptp_rew_op(skb);
if (rew_op)
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 3412051981d7..37ccf00404ea 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -12,25 +12,46 @@
#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+struct ocelot_8021q_tagger_private {
+ struct ocelot_8021q_tagger_data data; /* Must be first */
+ struct kthread_worker *xmit_worker;
+};
+
static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
+ struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data;
+ struct ocelot_8021q_tagger_data *data = &priv->data;
+ void (*xmit_work_fn)(struct kthread_work *work);
struct felix_deferred_xmit_work *xmit_work;
- struct felix_port *felix_port = dp->priv;
+ struct kthread_worker *xmit_worker;
+
+ xmit_work_fn = data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
+
+ /* PTP over IP packets need UDP checksumming. We may have inherited
+ * NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
+ * through the DSA master, so calculate the checksum here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
/* Calls felix_port_deferred_xmit in felix.c */
- kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
- kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+ kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
@@ -39,9 +60,9 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
struct ethhdr *hdr = eth_hdr(skb);
if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
@@ -56,7 +77,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
{
int src_port, switch_id;
- dsa_8021q_rcv(skb, &src_port, &switch_id);
+ dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
if (!skb->dev)
@@ -67,11 +88,43 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
return skb;
}
+static void ocelot_disconnect(struct dsa_switch *ds)
+{
+ struct ocelot_8021q_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int ocelot_connect(struct dsa_switch *ds)
+{
+ struct ocelot_8021q_tagger_private *priv;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ if (IS_ERR(priv->xmit_worker)) {
+ err = PTR_ERR(priv->xmit_worker);
+ kfree(priv);
+ return err;
+ }
+
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.name = "ocelot-8021q",
.proto = DSA_TAG_PROTO_OCELOT_8021Q,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
+ .connect = ocelot_connect,
+ .disconnect = ocelot_disconnect,
.needed_headroom = VLAN_HLEN,
.promisc_on_master = true,
};
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index f920487ae145..6d928ee3ef7a 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -54,7 +54,7 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
p = (__be16 *)tag;
*p = htons(RTL4_A_ETHERTYPE);
- out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
+ out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT);
/* The lower bits indicate the port number */
out |= BIT(dp->index);
diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c
new file mode 100644
index 000000000000..02686ad4045d
--- /dev/null
+++ b/net/dsa/tag_rtl8_4.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handler for Realtek 8 byte switch tags
+ *
+ * Copyright (C) 2021 Alvin Å ipraga <alsi@bang-olufsen.dk>
+ *
+ * NOTE: Currently only supports protocol "4" found in the RTL8365MB, hence
+ * named tag_rtl8_4.
+ *
+ * This tag header has the following format:
+ *
+ * -------------------------------------------
+ * | MAC DA | MAC SA | 8 byte tag | Type | ...
+ * -------------------------------------------
+ * _______________/ \______________________________________
+ * / \
+ * 0 7|8 15
+ * |-----------------------------------+-----------------------------------|---
+ * | (16-bit) | ^
+ * | Realtek EtherType [0x8899] | |
+ * |-----------------------------------+-----------------------------------| 8
+ * | (8-bit) | (8-bit) |
+ * | Protocol [0x04] | REASON | b
+ * |-----------------------------------+-----------------------------------| y
+ * | (1) | (1) | (2) | (1) | (3) | (1) | (1) | (1) | (5) | t
+ * | FID_EN | X | FID | PRI_EN | PRI | KEEP | X | LEARN_DIS | X | e
+ * |-----------------------------------+-----------------------------------| s
+ * | (1) | (15-bit) | |
+ * | ALLOW | TX/RX | v
+ * |-----------------------------------+-----------------------------------|---
+ *
+ * With the following field descriptions:
+ *
+ * field | description
+ * ------------+-------------
+ * Realtek | 0x8899: indicates that this is a proprietary Realtek tag;
+ * EtherType | note that Realtek uses the same EtherType for
+ * | other incompatible tag formats (e.g. tag_rtl4_a.c)
+ * Protocol | 0x04: indicates that this tag conforms to this format
+ * X | reserved
+ * ------------+-------------
+ * REASON | reason for forwarding packet to CPU
+ * | 0: packet was forwarded or flooded to CPU
+ * | 80: packet was trapped to CPU
+ * FID_EN | 1: packet has an FID
+ * | 0: no FID
+ * FID | FID of packet (if FID_EN=1)
+ * PRI_EN | 1: force priority of packet
+ * | 0: don't force priority
+ * PRI | priority of packet (if PRI_EN=1)
+ * KEEP | preserve packet VLAN tag format
+ * LEARN_DIS | don't learn the source MAC address of the packet
+ * ALLOW | 1: treat TX/RX field as an allowance port mask, meaning the
+ * | packet may only be forwarded to ports specified in the
+ * | mask
+ * | 0: no allowance port mask, TX/RX field is the forwarding
+ * | port mask
+ * TX/RX | TX (switch->CPU): port number the packet was received on
+ * | RX (CPU->switch): forwarding port mask (if ALLOW=0)
+ * | allowance port mask (if ALLOW=1)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+
+#include "dsa_priv.h"
+
+/* Protocols supported:
+ *
+ * 0x04 = RTL8365MB DSA protocol
+ */
+
+#define RTL8_4_TAG_LEN 8
+
+#define RTL8_4_PROTOCOL GENMASK(15, 8)
+#define RTL8_4_PROTOCOL_RTL8365MB 0x04
+#define RTL8_4_REASON GENMASK(7, 0)
+#define RTL8_4_REASON_FORWARD 0
+#define RTL8_4_REASON_TRAP 80
+
+#define RTL8_4_LEARN_DIS BIT(5)
+
+#define RTL8_4_TX GENMASK(3, 0)
+#define RTL8_4_RX GENMASK(10, 0)
+
+static struct sk_buff *rtl8_4_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *tag;
+
+ skb_push(skb, RTL8_4_TAG_LEN);
+
+ dsa_alloc_etype_header(skb, RTL8_4_TAG_LEN);
+ tag = dsa_etype_header_pos_tx(skb);
+
+ /* Set Realtek EtherType */
+ tag[0] = htons(ETH_P_REALTEK);
+
+ /* Set Protocol; zero REASON */
+ tag[1] = htons(FIELD_PREP(RTL8_4_PROTOCOL, RTL8_4_PROTOCOL_RTL8365MB));
+
+ /* Zero FID_EN, FID, PRI_EN, PRI, KEEP; set LEARN_DIS */
+ tag[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1));
+
+ /* Zero ALLOW; set RX (CPU->switch) forwarding port mask */
+ tag[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index)));
+
+ return skb;
+}
+
+static struct sk_buff *rtl8_4_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ __be16 *tag;
+ u16 etype;
+ u8 reason;
+ u8 proto;
+ u8 port;
+
+ if (unlikely(!pskb_may_pull(skb, RTL8_4_TAG_LEN)))
+ return NULL;
+
+ tag = dsa_etype_header_pos_rx(skb);
+
+ /* Parse Realtek EtherType */
+ etype = ntohs(tag[0]);
+ if (unlikely(etype != ETH_P_REALTEK)) {
+ dev_warn_ratelimited(&dev->dev,
+ "non-realtek ethertype 0x%04x\n", etype);
+ return NULL;
+ }
+
+ /* Parse Protocol */
+ proto = FIELD_GET(RTL8_4_PROTOCOL, ntohs(tag[1]));
+ if (unlikely(proto != RTL8_4_PROTOCOL_RTL8365MB)) {
+ dev_warn_ratelimited(&dev->dev,
+ "unknown realtek protocol 0x%02x\n",
+ proto);
+ return NULL;
+ }
+
+ /* Parse REASON */
+ reason = FIELD_GET(RTL8_4_REASON, ntohs(tag[1]));
+
+ /* Parse TX (switch->CPU) */
+ port = FIELD_GET(RTL8_4_TX, ntohs(tag[3]));
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&dev->dev,
+ "could not find slave for port %d\n",
+ port);
+ return NULL;
+ }
+
+ /* Remove tag and recalculate checksum */
+ skb_pull_rcsum(skb, RTL8_4_TAG_LEN);
+
+ dsa_strip_etype_header(skb, RTL8_4_TAG_LEN);
+
+ if (reason != RTL8_4_REASON_TRAP)
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops rtl8_4_netdev_ops = {
+ .name = "rtl8_4",
+ .proto = DSA_TAG_PROTO_RTL8_4,
+ .xmit = rtl8_4_tag_xmit,
+ .rcv = rtl8_4_tag_rcv,
+ .needed_headroom = RTL8_4_TAG_LEN,
+};
+module_dsa_tag_driver(rtl8_4_netdev_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index d43feadd5fa6..1a85125bda6d 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -4,7 +4,6 @@
#include <linux/if_vlan.h>
#include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h>
-#include <linux/skbuff.h>
#include <linux/packing.h>
#include "dsa_priv.h"
@@ -54,11 +53,25 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
-enum sja1110_meta_tstamp {
- SJA1110_META_TSTAMP_TX = 0,
- SJA1110_META_TSTAMP_RX = 1,
+#define SJA1105_HWTS_RX_EN 0
+
+struct sja1105_tagger_private {
+ struct sja1105_tagger_data data; /* Must be first */
+ unsigned long state;
+ /* Protects concurrent access to the meta state machine
+ * from taggers running on multiple ports on SMP systems
+ */
+ spinlock_t meta_lock;
+ struct sk_buff *stampable_skb;
+ struct kthread_worker *xmit_worker;
};
+static struct sja1105_tagger_private *
+sja1105_tagger_private(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
@@ -125,16 +138,30 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct sja1105_deferred_xmit_work *xmit_work;
+ struct kthread_worker *xmit_worker;
- if (!dsa_port_is_sja1105(dp))
- return skb;
+ xmit_work_fn = tagger_data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
- skb_queue_tail(&sp->xmit_queue, skb_get(skb));
- kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
@@ -158,18 +185,17 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp)
* we're sure about that). It may not be on this port though, so we
* need to find it.
*/
- list_for_each_entry(other_dp, &ds->dst->ports, list) {
- if (other_dp->ds != ds)
- continue;
+ dsa_switch_for_each_port(other_dp, ds) {
+ struct net_device *br = dsa_port_bridge_dev_get(other_dp);
- if (!other_dp->bridge_dev)
+ if (!br)
continue;
/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
* which seems pointless to handle, as our port cannot become
* VLAN-aware in that case.
*/
- br_vlan_get_proto(other_dp->bridge_dev, &proto);
+ br_vlan_get_proto(br, &proto);
return proto;
}
@@ -183,7 +209,8 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- struct net_device *br = dp->bridge_dev;
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 tx_vid;
/* If the port is under a VLAN-aware bridge, just slide the
@@ -199,7 +226,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
* TX VLAN that targets the bridge's entire broadcast domain,
* instead of just the specific port.
*/
- tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num);
+ tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
}
@@ -238,9 +265,9 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
if (skb->offload_fwd_mark)
return sja1105_imprecise_xmit(skb, netdev);
@@ -266,9 +293,9 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
__be32 *tx_trailer;
__be16 *tx_header;
int trailer_pos;
@@ -355,32 +382,32 @@ static struct sk_buff
*/
if (is_link_local) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_private *priv;
+ struct dsa_switch *ds = dp->ds;
- if (unlikely(!dsa_port_is_sja1105(dp)))
- return skb;
+ priv = sja1105_tagger_private(ds);
- if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
/* Do normal processing. */
return skb;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
*/
- if (sp->data->stampable_skb) {
- dev_err_ratelimited(dp->ds->dev,
+ if (priv->stampable_skb) {
+ dev_err_ratelimited(ds->dev,
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
- kfree_skb(sp->data->stampable_skb);
+ kfree_skb(priv->stampable_skb);
}
/* Hold a reference to avoid dsa_switch_rcv
* from freeing the skb.
*/
- sp->data->stampable_skb = skb_get(skb);
- spin_unlock(&sp->data->meta_lock);
+ priv->stampable_skb = skb_get(skb);
+ spin_unlock(&priv->meta_lock);
/* Tell DSA we got nothing */
return NULL;
@@ -393,37 +420,37 @@ static struct sk_buff
*/
} else if (is_meta) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_private *priv;
+ struct dsa_switch *ds = dp->ds;
struct sk_buff *stampable_skb;
- if (unlikely(!dsa_port_is_sja1105(dp)))
- return skb;
+ priv = sja1105_tagger_private(ds);
/* Drop the meta frame if we're not in the right state
* to process it.
*/
- if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
return NULL;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->meta_lock);
- stampable_skb = sp->data->stampable_skb;
- sp->data->stampable_skb = NULL;
+ stampable_skb = priv->stampable_skb;
+ priv->stampable_skb = NULL;
/* Was this a meta frame instead of the link-local
* that we were expecting?
*/
if (!stampable_skb) {
- dev_err_ratelimited(dp->ds->dev,
+ dev_err_ratelimited(ds->dev,
"Unexpected meta frame\n");
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
return NULL;
}
if (stampable_skb->dev != skb->dev) {
- dev_err_ratelimited(dp->ds->dev,
+ dev_err_ratelimited(ds->dev,
"Meta frame on wrong port\n");
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
return NULL;
}
@@ -434,12 +461,36 @@ static struct sk_buff
skb = stampable_skb;
sja1105_transfer_meta(skb, meta);
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
}
return skb;
}
+static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+
+ return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
+}
+
+static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
+{
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+
+ if (on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->state);
+ else
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
+
+ /* Initialize the meta state machine to a known state */
+ if (!priv->stampable_skb)
+ return;
+
+ kfree_skb(priv->stampable_skb);
+ priv->stampable_skb = NULL;
+}
+
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
@@ -458,7 +509,7 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
* packet.
*/
static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
- int *switch_id, u16 *vid)
+ int *switch_id, int *vbid, u16 *vid)
{
struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
u16 vlan_tci;
@@ -468,8 +519,8 @@ static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
else
vlan_tci = ntohs(hdr->h_vlan_TCI);
- if (vid_is_dsa_8021q_rxvlan(vlan_tci & VLAN_VID_MASK))
- return dsa_8021q_rcv(skb, source_port, switch_id);
+ if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
+ return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
/* Try our best with imprecise RX */
*vid = vlan_tci & VLAN_VID_MASK;
@@ -478,7 +529,7 @@ static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1;
+ int source_port = -1, switch_id = -1, vbid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
@@ -491,7 +542,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
if (sja1105_skb_has_tag_8021q(skb)) {
/* Normal traffic path. */
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
+ sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
} else if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
@@ -510,7 +561,9 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
return NULL;
}
- if (source_port == -1 || switch_id == -1)
+ if (vbid >= 1)
+ skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
+ else if (source_port == -1 || switch_id == -1)
skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
else
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
@@ -526,48 +579,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
-static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
- u8 ts_id, enum sja1110_meta_tstamp dir,
- u64 tstamp)
-{
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct skb_shared_hwtstamps shwt = {0};
- struct sja1105_port *sp = dp->priv;
-
- if (!dsa_port_is_sja1105(dp))
- return;
-
- /* We don't care about RX timestamps on the CPU port */
- if (dir == SJA1110_META_TSTAMP_RX)
- return;
-
- spin_lock(&sp->data->skb_txtstamp_queue.lock);
-
- skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
- if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
- continue;
-
- __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
- skb_match = skb;
-
- break;
- }
-
- spin_unlock(&sp->data->skb_txtstamp_queue.lock);
-
- if (WARN_ON(!skb_match))
- return;
-
- shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
- skb_complete_tx_timestamp(skb_match, &shwt);
-}
-
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
+ struct sja1105_tagger_data *tagger_data;
struct net_device *master = skb->dev;
struct dsa_port *cpu_dp;
struct dsa_switch *ds;
@@ -581,6 +598,10 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
return NULL;
}
+ tagger_data = sja1105_tagger_data(ds);
+ if (!tagger_data->meta_tstamp_handler)
+ return NULL;
+
for (i = 0; i <= n_ts; i++) {
u8 ts_id, source_port, dir;
u64 tstamp;
@@ -590,8 +611,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
dir = (buf[1] & BIT(3)) >> 3;
tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
- sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
- tstamp);
+ tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
+ tstamp);
buf += SJA1110_META_TSTAMP_SIZE;
}
@@ -668,7 +689,7 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1;
+ int source_port = -1, switch_id = -1, vbid = -1;
bool host_only = false;
u16 vid = 0;
@@ -682,9 +703,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
+ sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
- if (source_port == -1 || switch_id == -1)
+ if (vbid >= 1)
+ skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
+ else if (source_port == -1 || switch_id == -1)
skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
else
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
@@ -723,11 +746,53 @@ static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
}
+static void sja1105_disconnect(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int sja1105_connect(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_data *tagger_data;
+ struct sja1105_tagger_private *priv;
+ struct kthread_worker *xmit_worker;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->meta_lock);
+
+ xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ ds->dst->index, ds->index);
+ if (IS_ERR(xmit_worker)) {
+ err = PTR_ERR(xmit_worker);
+ kfree(priv);
+ return err;
+ }
+
+ priv->xmit_worker = xmit_worker;
+ /* Export functions for switch driver use */
+ tagger_data = &priv->data;
+ tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
+ tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = "sja1105",
.proto = DSA_TAG_PROTO_SJA1105,
.xmit = sja1105_xmit,
.rcv = sja1105_rcv,
+ .connect = sja1105_connect,
+ .disconnect = sja1105_disconnect,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
.promisc_on_master = true,
@@ -741,6 +806,8 @@ static const struct dsa_device_ops sja1110_netdev_ops = {
.proto = DSA_TAG_PROTO_SJA1110,
.xmit = sja1110_xmit,
.rcv = sja1110_rcv,
+ .connect = sja1105_connect,
+ .disconnect = sja1105_disconnect,
.flow_dissect = sja1110_flow_dissect,
.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 73fce9467467..3ae5f3eb0536 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -534,8 +534,10 @@ EXPORT_SYMBOL(eth_platform_get_mac_address);
int nvmem_get_mac_address(struct device *dev, void *addrbuf)
{
struct nvmem_cell *cell;
- const void *mac;
+ const unsigned char *mac;
+ unsigned char macaddr[ETH_ALEN];
size_t len;
+ int i = 0;
cell = nvmem_cell_get(dev, "mac-address");
if (IS_ERR(cell))
@@ -547,14 +549,27 @@ int nvmem_get_mac_address(struct device *dev, void *addrbuf)
if (IS_ERR(mac))
return PTR_ERR(mac);
- if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
- kfree(mac);
- return -EINVAL;
+ if (len != ETH_ALEN)
+ goto invalid_addr;
+
+ if (dev->of_node &&
+ of_property_read_bool(dev->of_node, "nvmem_macaddr_swap")) {
+ for (i = 0; i < ETH_ALEN; i++)
+ macaddr[i] = mac[ETH_ALEN - i - 1];
+ } else {
+ ether_addr_copy(macaddr, mac);
}
- ether_addr_copy(addrbuf, mac);
+ if (!is_valid_ether_addr(macaddr))
+ goto invalid_addr;
+
+ ether_addr_copy(addrbuf, macaddr);
kfree(mac);
return 0;
+
+invalid_addr:
+ kfree(mac);
+ return -EINVAL;
}
EXPORT_SYMBOL(nvmem_get_mac_address);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d775676956bf..10198cfc218e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -19,8 +19,10 @@
#include <net/sock.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/act_api.h>
#include <net/netlink.h>
+#include <net/flow_offload.h>
#ifdef CONFIG_INET
DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
@@ -129,8 +131,92 @@ static void free_tcf(struct tc_action *p)
kfree(p);
}
+static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
+{
+ if (is_tcf_pedit(act))
+ return tcf_pedit_nkeys(act);
+ else
+ return 1;
+}
+
+static int offload_action_init(struct flow_offload_action *fl_action,
+ struct tc_action *act,
+ enum offload_act_command cmd,
+ struct netlink_ext_ack *extack)
+{
+ fl_action->extack = extack;
+ fl_action->command = cmd;
+ fl_action->index = act->tcfa_index;
+
+ if (act->ops->offload_act_setup)
+ return act->ops->offload_act_setup(act, fl_action, NULL, false);
+
+ return -EOPNOTSUPP;
+}
+
+static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
+ fl_act, NULL, NULL);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/* offload the tc action after it is inserted */
+static int tcf_action_offload_add(struct tc_action *action,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
+ [0] = action,
+ };
+ struct flow_offload_action *fl_action;
+ int num, err = 0;
+
+ num = tcf_offload_act_num_actions_single(action);
+ fl_action = offload_action_alloc(num);
+ if (!fl_action)
+ return -ENOMEM;
+
+ err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
+ if (err)
+ goto fl_err;
+
+ err = tc_setup_action(&fl_action->action, actions);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to setup tc actions for offload\n");
+ goto fl_err;
+ }
+
+ err = tcf_action_offload_cmd(fl_action, extack);
+ tc_cleanup_offload_action(&fl_action->action);
+
+fl_err:
+ kfree(fl_action);
+
+ return err;
+}
+
+static int tcf_action_offload_del(struct tc_action *action)
+{
+ struct flow_offload_action fl_act = {};
+ int err = 0;
+
+ err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
+ if (err)
+ return err;
+
+ return tcf_action_offload_cmd(&fl_act, NULL);
+}
+
static void tcf_action_cleanup(struct tc_action *p)
{
+ tcf_action_offload_del(p);
if (p->ops->cleanup)
p->ops->cleanup(p);
@@ -1074,6 +1160,11 @@ err_out:
return ERR_PTR(err);
}
+static bool tc_act_bind(u32 flags)
+{
+ return !!(flags & TCA_ACT_FLAGS_BIND);
+}
+
/* Returns numbers of initialized actions or negative error. */
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
@@ -1116,6 +1207,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
+ if (!tc_act_bind(flags))
+ tcf_action_offload_add(act, extack);
}
/* We have to commit them all together, because if any error happened in
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index a15ec95e69c3..e0f515b774ca 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -695,6 +695,24 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum));
}
+static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = tcf_csum_update_flags(act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_CSUM;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
.id = TCA_ID_CSUM,
@@ -706,6 +724,7 @@ static struct tc_action_ops act_csum_ops = {
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
+ .offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 81a2d6cbfb44..b5e1a9aba38e 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1499,6 +1499,26 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
}
+static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_CT;
+ entry->ct.action = tcf_ct_action(act);
+ entry->ct.zone = tcf_ct_zone(act);
+ entry->ct.flow_table = tcf_ct_ft(act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_CT;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_ct_ops = {
.kind = "ct",
.id = TCA_ID_CT,
@@ -1510,6 +1530,7 @@ static struct tc_action_ops act_ct_ops = {
.walk = tcf_ct_walker,
.lookup = tcf_ct_search,
.stats_update = tcf_stats_update,
+ .offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index d8dce173df37..bde6a6c01e64 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -252,6 +252,43 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz;
}
+static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_gact_ok(act)) {
+ entry->id = FLOW_ACTION_ACCEPT;
+ } else if (is_tcf_gact_shot(act)) {
+ entry->id = FLOW_ACTION_DROP;
+ } else if (is_tcf_gact_trap(act)) {
+ entry->id = FLOW_ACTION_TRAP;
+ } else if (is_tcf_gact_goto_chain(act)) {
+ entry->id = FLOW_ACTION_GOTO;
+ entry->chain_index = tcf_gact_goto_chain_index(act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_gact_ok(act))
+ fl_action->id = FLOW_ACTION_ACCEPT;
+ else if (is_tcf_gact_shot(act))
+ fl_action->id = FLOW_ACTION_DROP;
+ else if (is_tcf_gact_trap(act))
+ fl_action->id = FLOW_ACTION_TRAP;
+ else if (is_tcf_gact_goto_chain(act))
+ fl_action->id = FLOW_ACTION_GOTO;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
.id = TCA_ID_GACT,
@@ -263,6 +300,7 @@ static struct tc_action_ops act_gact_ops = {
.walk = tcf_gact_walker,
.lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
+ .offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index ac985c53ebaf..d56e73843a4b 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -597,6 +597,54 @@ static size_t tcf_gate_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_gate));
}
+static void tcf_gate_entry_destructor(void *priv)
+{
+ struct action_gate_entry *oe = priv;
+
+ kfree(oe);
+}
+
+static int tcf_gate_get_entries(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->gate.entries = tcf_gate_get_list(act);
+
+ if (!entry->gate.entries)
+ return -EINVAL;
+
+ entry->destructor = tcf_gate_entry_destructor;
+ entry->destructor_priv = entry->gate.entries;
+
+ return 0;
+}
+
+static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ int err;
+
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_GATE;
+ entry->gate.prio = tcf_gate_prio(act);
+ entry->gate.basetime = tcf_gate_basetime(act);
+ entry->gate.cycletime = tcf_gate_cycletime(act);
+ entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
+ entry->gate.num_entries = tcf_gate_num_entries(act);
+ err = tcf_gate_get_entries(entry, act);
+ if (err)
+ return err;
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_GATE;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_gate_ops = {
.kind = "gate",
.id = TCA_ID_GATE,
@@ -609,6 +657,7 @@ static struct tc_action_ops act_gate_ops = {
.stats_update = tcf_gate_stats_update,
.get_fill_size = tcf_gate_get_fill_size,
.lookup = tcf_gate_search,
+ .offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 97cd4b2377d6..e738e9c5953a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -455,6 +455,55 @@ static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_mirred));
}
+static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->dev = act->ops->get_dev(act, &entry->destructor);
+ if (!entry->dev)
+ return;
+ entry->destructor_priv = entry->dev;
+}
+
+static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_mirred_egress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_egress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_ingress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT_INGRESS;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_ingress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED_INGRESS;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_mirred_egress_redirect(act))
+ fl_action->id = FLOW_ACTION_REDIRECT;
+ else if (is_tcf_mirred_egress_mirror(act))
+ fl_action->id = FLOW_ACTION_MIRRED;
+ else if (is_tcf_mirred_ingress_redirect(act))
+ fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
+ else if (is_tcf_mirred_ingress_mirror(act))
+ fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.id = TCA_ID_MIRRED,
@@ -467,6 +516,7 @@ static struct tc_action_ops act_mirred_ops = {
.walk = tcf_mirred_walker,
.lookup = tcf_mirred_search,
.get_fill_size = tcf_mirred_get_fill_size,
+ .offload_act_setup = tcf_mirred_offload_act_setup,
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index d010c5b8e83b..c75ebb725079 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -398,6 +398,57 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ switch (tcf_mpls_action(act)) {
+ case TCA_MPLS_ACT_PUSH:
+ entry->id = FLOW_ACTION_MPLS_PUSH;
+ entry->mpls_push.proto = tcf_mpls_proto(act);
+ entry->mpls_push.label = tcf_mpls_label(act);
+ entry->mpls_push.tc = tcf_mpls_tc(act);
+ entry->mpls_push.bos = tcf_mpls_bos(act);
+ entry->mpls_push.ttl = tcf_mpls_ttl(act);
+ break;
+ case TCA_MPLS_ACT_POP:
+ entry->id = FLOW_ACTION_MPLS_POP;
+ entry->mpls_pop.proto = tcf_mpls_proto(act);
+ break;
+ case TCA_MPLS_ACT_MODIFY:
+ entry->id = FLOW_ACTION_MPLS_MANGLE;
+ entry->mpls_mangle.label = tcf_mpls_label(act);
+ entry->mpls_mangle.tc = tcf_mpls_tc(act);
+ entry->mpls_mangle.bos = tcf_mpls_bos(act);
+ entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ switch (tcf_mpls_action(act)) {
+ case TCA_MPLS_ACT_PUSH:
+ fl_action->id = FLOW_ACTION_MPLS_PUSH;
+ break;
+ case TCA_MPLS_ACT_POP:
+ fl_action->id = FLOW_ACTION_MPLS_POP;
+ break;
+ case TCA_MPLS_ACT_MODIFY:
+ fl_action->id = FLOW_ACTION_MPLS_MANGLE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_mpls_ops = {
.kind = "mpls",
.id = TCA_ID_MPLS,
@@ -408,6 +459,7 @@ static struct tc_action_ops act_mpls_ops = {
.cleanup = tcf_mpls_cleanup,
.walk = tcf_mpls_walker,
.lookup = tcf_mpls_search,
+ .offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 051cd2092859..e3cb1460b2b1 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -539,6 +539,39 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+ int k;
+
+ for (k = 0; k < tcf_pedit_nkeys(act); k++) {
+ switch (tcf_pedit_cmd(act, k)) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ entry->id = FLOW_ACTION_MANGLE;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ entry->id = FLOW_ACTION_ADD;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ entry->mangle.htype = tcf_pedit_htype(act, k);
+ entry->mangle.mask = tcf_pedit_mask(act, k);
+ entry->mangle.val = tcf_pedit_val(act, k);
+ entry->mangle.offset = tcf_pedit_offset(act, k);
+ entry->hw_stats = tc_act_hw_stats(act->hw_stats);
+ entry++;
+ }
+ *index_inc = k;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
.id = TCA_ID_PEDIT,
@@ -550,6 +583,7 @@ static struct tc_action_ops act_pedit_ops = {
.init = tcf_pedit_init,
.walk = tcf_pedit_walker,
.lookup = tcf_pedit_search,
+ .offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index d44b933b821d..d2550c3744a4 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -419,6 +419,76 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_police_act_to_flow_act(int tc_act, u32 *extval)
+{
+ int act_id = -EOPNOTSUPP;
+
+ if (!TC_ACT_EXT_OPCODE(tc_act)) {
+ if (tc_act == TC_ACT_OK)
+ act_id = FLOW_ACTION_ACCEPT;
+ else if (tc_act == TC_ACT_SHOT)
+ act_id = FLOW_ACTION_DROP;
+ else if (tc_act == TC_ACT_PIPE)
+ act_id = FLOW_ACTION_PIPE;
+ } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
+ act_id = FLOW_ACTION_GOTO;
+ *extval = tc_act & TC_ACT_EXT_VAL_MASK;
+ } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
+ act_id = FLOW_ACTION_JUMP;
+ *extval = tc_act & TC_ACT_EXT_VAL_MASK;
+ }
+
+ return act_id;
+}
+
+static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *p;
+ int act_id;
+
+ p = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+
+ entry->id = FLOW_ACTION_POLICE;
+ entry->police.burst = tcf_police_burst(act);
+ entry->police.rate_bytes_ps =
+ tcf_police_rate_bytes_ps(act);
+ entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act);
+ entry->police.avrate = tcf_police_tcfp_ewma_rate(act);
+ entry->police.overhead = tcf_police_rate_overhead(act);
+ entry->police.burst_pkt = tcf_police_burst_pkt(act);
+ entry->police.rate_pkt_ps =
+ tcf_police_rate_pkt_ps(act);
+ entry->police.mtu = tcf_police_tcfp_mtu(act);
+
+ act_id = tcf_police_act_to_flow_act(police->tcf_action,
+ &entry->police.exceed.extval);
+ if (act_id < 0)
+ return act_id;
+
+ entry->police.exceed.act_id = act_id;
+
+ act_id = tcf_police_act_to_flow_act(p->tcfp_result,
+ &entry->police.notexceed.extval);
+ if (act_id < 0)
+ return act_id;
+
+ entry->police.notexceed.act_id = act_id;
+
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_POLICE;
+ }
+
+ return 0;
+}
+
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
@@ -434,6 +504,7 @@ static struct tc_action_ops act_police_ops = {
.walk = tcf_police_walker,
.lookup = tcf_police_search,
.cleanup = tcf_police_cleanup,
+ .offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ca67d9644917..0c0cb7fd69f2 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -289,6 +289,35 @@ tcf_sample_get_group(const struct tc_action *a,
return group;
}
+static void tcf_offload_sample_get_group(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->sample.psample_group =
+ act->ops->get_psample_group(act, &entry->destructor);
+ entry->destructor_priv = entry->sample.psample_group;
+}
+
+static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_SAMPLE;
+ entry->sample.trunc_size = tcf_sample_trunc_size(act);
+ entry->sample.truncate = tcf_sample_truncate(act);
+ entry->sample.rate = tcf_sample_rate(act);
+ tcf_offload_sample_get_group(entry, act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_SAMPLE;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.id = TCA_ID_SAMPLE,
@@ -301,6 +330,7 @@ static struct tc_action_ops act_sample_ops = {
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
.get_psample_group = tcf_sample_get_group,
+ .offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 6088ceaf582e..9c1da996f103 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -327,6 +327,41 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
+ nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
}
+static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_skbedit_mark(act)) {
+ entry->id = FLOW_ACTION_MARK;
+ entry->mark = tcf_skbedit_mark(act);
+ } else if (is_tcf_skbedit_ptype(act)) {
+ entry->id = FLOW_ACTION_PTYPE;
+ entry->ptype = tcf_skbedit_ptype(act);
+ } else if (is_tcf_skbedit_priority(act)) {
+ entry->id = FLOW_ACTION_PRIORITY;
+ entry->priority = tcf_skbedit_priority(act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_skbedit_mark(act))
+ fl_action->id = FLOW_ACTION_MARK;
+ else if (is_tcf_skbedit_ptype(act))
+ fl_action->id = FLOW_ACTION_PTYPE;
+ else if (is_tcf_skbedit_priority(act))
+ fl_action->id = FLOW_ACTION_PRIORITY;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.id = TCA_ID_SKBEDIT,
@@ -339,6 +374,7 @@ static struct tc_action_ops act_skbedit_ops = {
.walk = tcf_skbedit_walker,
.get_fill_size = tcf_skbedit_get_fill_size,
.lookup = tcf_skbedit_search,
+ .offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index d9cd174eecb7..23aba03d26a8 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -787,6 +787,59 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static void tcf_tunnel_encap_put_tunnel(void *priv)
+{
+ struct ip_tunnel_info *tunnel = priv;
+
+ kfree(tunnel);
+}
+
+static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->tunnel = tcf_tunnel_info_copy(act);
+ if (!entry->tunnel)
+ return -ENOMEM;
+ entry->destructor = tcf_tunnel_encap_put_tunnel;
+ entry->destructor_priv = entry->tunnel;
+ return 0;
+}
+
+static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
+ void *entry_data,
+ u32 *index_inc,
+ bool bind)
+{
+ int err;
+
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_tunnel_set(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_ENCAP;
+ err = tcf_tunnel_encap_get_tunnel(entry, act);
+ if (err)
+ return err;
+ } else if (is_tcf_tunnel_release(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_DECAP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_tunnel_set(act))
+ fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
+ else if (is_tcf_tunnel_release(act))
+ fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.id = TCA_ID_TUNNEL_KEY,
@@ -797,6 +850,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
+ .offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index e4dc5a555bd8..756e2dcde1cd 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -368,6 +368,53 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
+ nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
}
+static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ case TCA_VLAN_ACT_POP:
+ entry->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ entry->id = FLOW_ACTION_VLAN_MANGLE;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ fl_action->id = FLOW_ACTION_VLAN_PUSH;
+ break;
+ case TCA_VLAN_ACT_POP:
+ fl_action->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ fl_action->id = FLOW_ACTION_VLAN_MANGLE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.id = TCA_ID_VLAN,
@@ -380,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = {
.stats_update = tcf_vlan_stats_update,
.get_fill_size = tcf_vlan_get_fill_size,
.lookup = tcf_vlan_search,
+ .offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 501e05943f02..46dc65e44b5d 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3471,7 +3471,7 @@ static void tcf_act_put_cookie(struct flow_action_entry *entry)
flow_action_cookie_destroy(entry->cookie);
}
-void tc_cleanup_flow_action(struct flow_action *flow_action)
+void tc_cleanup_offload_action(struct flow_action *flow_action)
{
struct flow_action_entry *entry;
int i;
@@ -3482,93 +3482,37 @@ void tc_cleanup_flow_action(struct flow_action *flow_action)
entry->destructor(entry->destructor_priv);
}
}
-EXPORT_SYMBOL(tc_cleanup_flow_action);
+EXPORT_SYMBOL(tc_cleanup_offload_action);
-static void tcf_mirred_get_dev(struct flow_action_entry *entry,
- const struct tc_action *act)
+static int tc_setup_offload_act(struct tc_action *act,
+ struct flow_action_entry *entry,
+ u32 *index_inc)
{
#ifdef CONFIG_NET_CLS_ACT
- entry->dev = act->ops->get_dev(act, &entry->destructor);
- if (!entry->dev)
- return;
- entry->destructor_priv = entry->dev;
-#endif
-}
-
-static void tcf_tunnel_encap_put_tunnel(void *priv)
-{
- struct ip_tunnel_info *tunnel = priv;
-
- kfree(tunnel);
-}
-
-static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
- entry->tunnel = tcf_tunnel_info_copy(act);
- if (!entry->tunnel)
- return -ENOMEM;
- entry->destructor = tcf_tunnel_encap_put_tunnel;
- entry->destructor_priv = entry->tunnel;
+ if (act->ops->offload_act_setup)
+ return act->ops->offload_act_setup(act, entry, index_inc, true);
+ else
+ return -EOPNOTSUPP;
+#else
return 0;
-}
-
-static void tcf_sample_get_group(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
-#ifdef CONFIG_NET_CLS_ACT
- entry->sample.psample_group =
- act->ops->get_psample_group(act, &entry->destructor);
- entry->destructor_priv = entry->sample.psample_group;
#endif
}
-static void tcf_gate_entry_destructor(void *priv)
-{
- struct action_gate_entry *oe = priv;
-
- kfree(oe);
-}
-
-static int tcf_gate_get_entries(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
- entry->gate.entries = tcf_gate_get_list(act);
-
- if (!entry->gate.entries)
- return -EINVAL;
-
- entry->destructor = tcf_gate_entry_destructor;
- entry->destructor_priv = entry->gate.entries;
-
- return 0;
-}
-
-static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
-{
- if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
- return FLOW_ACTION_HW_STATS_DONT_CARE;
- else if (!hw_stats)
- return FLOW_ACTION_HW_STATS_DISABLED;
-
- return hw_stats;
-}
-
-int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts)
+int tc_setup_action(struct flow_action *flow_action,
+ struct tc_action *actions[])
{
+ int i, j, index, err = 0;
struct tc_action *act;
- int i, j, k, err = 0;
BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
- if (!exts)
+ if (!actions)
return 0;
j = 0;
- tcf_exts_for_each_action(i, act, exts) {
+ tcf_act_for_each_action(i, act, actions) {
struct flow_action_entry *entry;
entry = &flow_action->entries[j];
@@ -3578,165 +3522,39 @@ int tc_setup_flow_action(struct flow_action *flow_action,
goto err_out_locked;
entry->hw_stats = tc_act_hw_stats(act->hw_stats);
-
- if (is_tcf_gact_ok(act)) {
- entry->id = FLOW_ACTION_ACCEPT;
- } else if (is_tcf_gact_shot(act)) {
- entry->id = FLOW_ACTION_DROP;
- } else if (is_tcf_gact_trap(act)) {
- entry->id = FLOW_ACTION_TRAP;
- } else if (is_tcf_gact_goto_chain(act)) {
- entry->id = FLOW_ACTION_GOTO;
- entry->chain_index = tcf_gact_goto_chain_index(act);
- } else if (is_tcf_mirred_egress_redirect(act)) {
- entry->id = FLOW_ACTION_REDIRECT;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_egress_mirror(act)) {
- entry->id = FLOW_ACTION_MIRRED;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_ingress_redirect(act)) {
- entry->id = FLOW_ACTION_REDIRECT_INGRESS;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_ingress_mirror(act)) {
- entry->id = FLOW_ACTION_MIRRED_INGRESS;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_vlan(act)) {
- switch (tcf_vlan_action(act)) {
- case TCA_VLAN_ACT_PUSH:
- entry->id = FLOW_ACTION_VLAN_PUSH;
- entry->vlan.vid = tcf_vlan_push_vid(act);
- entry->vlan.proto = tcf_vlan_push_proto(act);
- entry->vlan.prio = tcf_vlan_push_prio(act);
- break;
- case TCA_VLAN_ACT_POP:
- entry->id = FLOW_ACTION_VLAN_POP;
- break;
- case TCA_VLAN_ACT_MODIFY:
- entry->id = FLOW_ACTION_VLAN_MANGLE;
- entry->vlan.vid = tcf_vlan_push_vid(act);
- entry->vlan.proto = tcf_vlan_push_proto(act);
- entry->vlan.prio = tcf_vlan_push_prio(act);
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- } else if (is_tcf_tunnel_set(act)) {
- entry->id = FLOW_ACTION_TUNNEL_ENCAP;
- err = tcf_tunnel_encap_get_tunnel(entry, act);
- if (err)
- goto err_out_locked;
- } else if (is_tcf_tunnel_release(act)) {
- entry->id = FLOW_ACTION_TUNNEL_DECAP;
- } else if (is_tcf_pedit(act)) {
- for (k = 0; k < tcf_pedit_nkeys(act); k++) {
- switch (tcf_pedit_cmd(act, k)) {
- case TCA_PEDIT_KEY_EX_CMD_SET:
- entry->id = FLOW_ACTION_MANGLE;
- break;
- case TCA_PEDIT_KEY_EX_CMD_ADD:
- entry->id = FLOW_ACTION_ADD;
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- entry->mangle.htype = tcf_pedit_htype(act, k);
- entry->mangle.mask = tcf_pedit_mask(act, k);
- entry->mangle.val = tcf_pedit_val(act, k);
- entry->mangle.offset = tcf_pedit_offset(act, k);
- entry->hw_stats = tc_act_hw_stats(act->hw_stats);
- entry = &flow_action->entries[++j];
- }
- } else if (is_tcf_csum(act)) {
- entry->id = FLOW_ACTION_CSUM;
- entry->csum_flags = tcf_csum_update_flags(act);
- } else if (is_tcf_skbedit_mark(act)) {
- entry->id = FLOW_ACTION_MARK;
- entry->mark = tcf_skbedit_mark(act);
- } else if (is_tcf_sample(act)) {
- entry->id = FLOW_ACTION_SAMPLE;
- entry->sample.trunc_size = tcf_sample_trunc_size(act);
- entry->sample.truncate = tcf_sample_truncate(act);
- entry->sample.rate = tcf_sample_rate(act);
- tcf_sample_get_group(entry, act);
- } else if (is_tcf_police(act)) {
- entry->id = FLOW_ACTION_POLICE;
- entry->police.burst = tcf_police_burst(act);
- entry->police.rate_bytes_ps =
- tcf_police_rate_bytes_ps(act);
- entry->police.burst_pkt = tcf_police_burst_pkt(act);
- entry->police.rate_pkt_ps =
- tcf_police_rate_pkt_ps(act);
- entry->police.mtu = tcf_police_tcfp_mtu(act);
- entry->police.index = act->tcfa_index;
- } else if (is_tcf_ct(act)) {
- entry->id = FLOW_ACTION_CT;
- entry->ct.action = tcf_ct_action(act);
- entry->ct.zone = tcf_ct_zone(act);
- entry->ct.flow_table = tcf_ct_ft(act);
- } else if (is_tcf_mpls(act)) {
- switch (tcf_mpls_action(act)) {
- case TCA_MPLS_ACT_PUSH:
- entry->id = FLOW_ACTION_MPLS_PUSH;
- entry->mpls_push.proto = tcf_mpls_proto(act);
- entry->mpls_push.label = tcf_mpls_label(act);
- entry->mpls_push.tc = tcf_mpls_tc(act);
- entry->mpls_push.bos = tcf_mpls_bos(act);
- entry->mpls_push.ttl = tcf_mpls_ttl(act);
- break;
- case TCA_MPLS_ACT_POP:
- entry->id = FLOW_ACTION_MPLS_POP;
- entry->mpls_pop.proto = tcf_mpls_proto(act);
- break;
- case TCA_MPLS_ACT_MODIFY:
- entry->id = FLOW_ACTION_MPLS_MANGLE;
- entry->mpls_mangle.label = tcf_mpls_label(act);
- entry->mpls_mangle.tc = tcf_mpls_tc(act);
- entry->mpls_mangle.bos = tcf_mpls_bos(act);
- entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- } else if (is_tcf_skbedit_ptype(act)) {
- entry->id = FLOW_ACTION_PTYPE;
- entry->ptype = tcf_skbedit_ptype(act);
- } else if (is_tcf_skbedit_priority(act)) {
- entry->id = FLOW_ACTION_PRIORITY;
- entry->priority = tcf_skbedit_priority(act);
- } else if (is_tcf_gate(act)) {
- entry->id = FLOW_ACTION_GATE;
- entry->gate.index = tcf_gate_index(act);
- entry->gate.prio = tcf_gate_prio(act);
- entry->gate.basetime = tcf_gate_basetime(act);
- entry->gate.cycletime = tcf_gate_cycletime(act);
- entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
- entry->gate.num_entries = tcf_gate_num_entries(act);
- err = tcf_gate_get_entries(entry, act);
- if (err)
- goto err_out_locked;
- } else {
- err = -EOPNOTSUPP;
+ entry->hw_index = act->tcfa_index;
+ index = 0;
+ err = tc_setup_offload_act(act, entry, &index);
+ if (!err)
+ j += index;
+ else
goto err_out_locked;
- }
spin_unlock_bh(&act->tcfa_lock);
-
- if (!is_tcf_pedit(act))
- j++;
}
err_out:
if (err)
- tc_cleanup_flow_action(flow_action);
+ tc_cleanup_offload_action(flow_action);
return err;
err_out_locked:
spin_unlock_bh(&act->tcfa_lock);
goto err_out;
}
-EXPORT_SYMBOL(tc_setup_flow_action);
+
+int tc_setup_offload_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (!exts)
+ return 0;
+
+ return tc_setup_action(flow_action, exts->actions);
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(tc_setup_offload_action);
unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 32b03a13f9b2..808be600ec1d 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -462,7 +462,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (skip_sw) {
@@ -474,7 +474,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
- tc_cleanup_flow_action(&cls_flower.rule->action);
+ tc_cleanup_offload_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {
@@ -2275,7 +2275,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
cls_flower.rule->match.mask = &f->mask->key;
cls_flower.rule->match.key = &f->mkey;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (tc_skip_sw(f->flags)) {
@@ -2292,7 +2292,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_SETUP_CLSFLOWER, &cls_flower,
cb_priv, &f->flags,
&f->in_hw_count);
- tc_cleanup_flow_action(&cls_flower.rule->action);
+ tc_cleanup_offload_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 24f0046ce0b3..2d2702915cfa 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.cookie = cookie;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
@@ -111,7 +111,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
skip_sw, &head->flags, &head->in_hw_count, true);
- tc_cleanup_flow_action(&cls_mall.rule->action);
+ tc_cleanup_offload_action(&cls_mall.rule->action);
kfree(cls_mall.rule);
if (err) {
@@ -301,7 +301,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = (unsigned long)head;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
if (add && tc_skip_sw(head->flags)) {
@@ -314,7 +314,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
&cls_mall, cb_priv, &head->flags,
&head->in_hw_count);
- tc_cleanup_flow_action(&cls_mall.rule->action);
+ tc_cleanup_offload_action(&cls_mall.rule->action);
kfree(cls_mall.rule);
if (err)
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 0b2c18efc079..9f22d8de889b 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -408,10 +408,10 @@ static int switchdev_lower_dev_walk(struct net_device *lower_dev,
}
static struct net_device *
-switchdev_lower_dev_find(struct net_device *dev,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev))
+switchdev_lower_dev_find_rcu(struct net_device *dev,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev))
{
struct switchdev_nested_priv switchdev_priv = {
.check_cb = check_cb,
@@ -428,176 +428,69 @@ switchdev_lower_dev_find(struct net_device *dev,
return switchdev_priv.lower_dev;
}
-static int __switchdev_handle_fdb_add_to_device(struct net_device *dev,
- const struct net_device *orig_dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
-{
- const struct switchdev_notifier_info *info = &fdb_info->info;
- struct net_device *br, *lower_dev;
- struct list_head *iter;
- int err = -EOPNOTSUPP;
-
- if (check_cb(dev))
- return add_cb(dev, orig_dev, info->ctx, fdb_info);
-
- if (netif_is_lag_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
- goto maybe_bridged_with_us;
-
- /* This is a LAG interface that we offload */
- if (!lag_add_cb)
- return -EOPNOTSUPP;
-
- return lag_add_cb(dev, orig_dev, info->ctx, fdb_info);
- }
-
- /* Recurse through lower interfaces in case the FDB entry is pointing
- * towards a bridge device.
- */
- if (netif_is_bridge_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
- return 0;
-
- /* This is a bridge interface that we offload */
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- /* Do not propagate FDB entries across bridges */
- if (netif_is_bridge_master(lower_dev))
- continue;
-
- /* Bridge ports might be either us, or LAG interfaces
- * that we offload.
- */
- if (!check_cb(lower_dev) &&
- !switchdev_lower_dev_find(lower_dev, check_cb,
- foreign_dev_check_cb))
- continue;
-
- err = __switchdev_handle_fdb_add_to_device(lower_dev, orig_dev,
- fdb_info, check_cb,
- foreign_dev_check_cb,
- add_cb, lag_add_cb);
- if (err && err != -EOPNOTSUPP)
- return err;
- }
-
- return 0;
- }
-
-maybe_bridged_with_us:
- /* Event is neither on a bridge nor a LAG. Check whether it is on an
- * interface that is in a bridge with us.
- */
- br = netdev_master_upper_dev_get_rcu(dev);
- if (!br || !netif_is_bridge_master(br))
- return 0;
-
- if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
- return 0;
-
- return __switchdev_handle_fdb_add_to_device(br, orig_dev, fdb_info,
- check_cb, foreign_dev_check_cb,
- add_cb, lag_add_cb);
-}
-
-int switchdev_handle_fdb_add_to_device(struct net_device *dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
+static struct net_device *
+switchdev_lower_dev_find(struct net_device *dev,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev))
{
- int err;
+ struct switchdev_nested_priv switchdev_priv = {
+ .check_cb = check_cb,
+ .foreign_dev_check_cb = foreign_dev_check_cb,
+ .dev = dev,
+ .lower_dev = NULL,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &switchdev_priv,
+ };
- err = __switchdev_handle_fdb_add_to_device(dev, dev, fdb_info,
- check_cb,
- foreign_dev_check_cb,
- add_cb, lag_add_cb);
- if (err == -EOPNOTSUPP)
- err = 0;
+ netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
- return err;
+ return switchdev_priv.lower_dev;
}
-EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device);
-static int __switchdev_handle_fdb_del_to_device(struct net_device *dev,
- const struct net_device *orig_dev,
+static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
+ struct net_device *orig_dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info))
{
const struct switchdev_notifier_info *info = &fdb_info->info;
- struct net_device *br, *lower_dev;
+ struct net_device *br, *lower_dev, *switchdev;
struct list_head *iter;
int err = -EOPNOTSUPP;
if (check_cb(dev))
- return del_cb(dev, orig_dev, info->ctx, fdb_info);
-
- if (netif_is_lag_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
- goto maybe_bridged_with_us;
-
- /* This is a LAG interface that we offload */
- if (!lag_del_cb)
- return -EOPNOTSUPP;
-
- return lag_del_cb(dev, orig_dev, info->ctx, fdb_info);
- }
+ return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
/* Recurse through lower interfaces in case the FDB entry is pointing
- * towards a bridge device.
+ * towards a bridge or a LAG device.
*/
- if (netif_is_bridge_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
- return 0;
-
- /* This is a bridge interface that we offload */
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- /* Do not propagate FDB entries across bridges */
- if (netif_is_bridge_master(lower_dev))
- continue;
-
- /* Bridge ports might be either us, or LAG interfaces
- * that we offload.
- */
- if (!check_cb(lower_dev) &&
- !switchdev_lower_dev_find(lower_dev, check_cb,
- foreign_dev_check_cb))
- continue;
-
- err = __switchdev_handle_fdb_del_to_device(lower_dev, orig_dev,
- fdb_info, check_cb,
- foreign_dev_check_cb,
- del_cb, lag_del_cb);
- if (err && err != -EOPNOTSUPP)
- return err;
- }
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ /* Do not propagate FDB entries across bridges */
+ if (netif_is_bridge_master(lower_dev))
+ continue;
- return 0;
+ /* Bridge ports might be either us, or LAG interfaces
+ * that we offload.
+ */
+ if (!check_cb(lower_dev) &&
+ !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
+ foreign_dev_check_cb))
+ continue;
+
+ err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
+ event, fdb_info, check_cb,
+ foreign_dev_check_cb,
+ mod_cb);
+ if (err && err != -EOPNOTSUPP)
+ return err;
}
-maybe_bridged_with_us:
/* Event is neither on a bridge nor a LAG. Check whether it is on an
* interface that is in a bridge with us.
*/
@@ -605,49 +498,51 @@ maybe_bridged_with_us:
if (!br || !netif_is_bridge_master(br))
return 0;
- if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
+ switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
+ if (!switchdev)
return 0;
- return __switchdev_handle_fdb_del_to_device(br, orig_dev, fdb_info,
- check_cb, foreign_dev_check_cb,
- del_cb, lag_del_cb);
+ if (!foreign_dev_check_cb(switchdev, dev))
+ return err;
+
+ return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
+ check_cb, foreign_dev_check_cb,
+ mod_cb);
}
-int switchdev_handle_fdb_del_to_device(struct net_device *dev,
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info))
{
int err;
- err = __switchdev_handle_fdb_del_to_device(dev, dev, fdb_info,
- check_cb,
- foreign_dev_check_cb,
- del_cb, lag_del_cb);
+ err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
+ check_cb, foreign_dev_check_cb,
+ mod_cb);
if (err == -EOPNOTSUPP)
err = 0;
return err;
}
-EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device);
+EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
static int __switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack))
{
struct switchdev_notifier_info *info = &port_obj_info->info;
+ struct net_device *br, *lower_dev, *switchdev;
struct netlink_ext_ack *extack;
- struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
@@ -671,15 +566,46 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
if (netif_is_bridge_master(lower_dev))
continue;
+ /* When searching for switchdev interfaces that are neighbors
+ * of foreign ones, and @dev is a bridge, do not recurse on the
+ * foreign interface again, it was already visited.
+ */
+ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
+ !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
+ continue;
+
err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
- check_cb, add_cb);
+ check_cb, foreign_dev_check_cb,
+ add_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
- return err;
+ /* Event is neither on a bridge nor a LAG. Check whether it is on an
+ * interface that is in a bridge with us.
+ */
+ if (!foreign_dev_check_cb)
+ return err;
+
+ br = netdev_master_upper_dev_get(dev);
+ if (!br || !netif_is_bridge_master(br))
+ return err;
+
+ switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
+ if (!switchdev)
+ return err;
+
+ if (!foreign_dev_check_cb(switchdev, dev))
+ return err;
+
+ return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
+ foreign_dev_check_cb, add_cb);
}
+/* Pass through a port object addition, if @dev passes @check_cb, or replicate
+ * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
+ * bridge or a LAG.
+ */
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
@@ -690,21 +616,46 @@ int switchdev_handle_port_obj_add(struct net_device *dev,
int err;
err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
- add_cb);
+ NULL, add_cb);
if (err == -EOPNOTSUPP)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
+/* Same as switchdev_handle_port_obj_add(), except if object is notified on a
+ * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
+ * that pass @check_cb and are in the same bridge as @dev.
+ */
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
+ foreign_dev_check_cb, add_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
+
static int __switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
struct switchdev_notifier_info *info = &port_obj_info->info;
- struct net_device *lower_dev;
+ struct net_device *br, *lower_dev, *switchdev;
struct list_head *iter;
int err = -EOPNOTSUPP;
@@ -726,15 +677,46 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
if (netif_is_bridge_master(lower_dev))
continue;
+ /* When searching for switchdev interfaces that are neighbors
+ * of foreign ones, and @dev is a bridge, do not recurse on the
+ * foreign interface again, it was already visited.
+ */
+ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
+ !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
+ continue;
+
err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
- check_cb, del_cb);
+ check_cb, foreign_dev_check_cb,
+ del_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
- return err;
+ /* Event is neither on a bridge nor a LAG. Check whether it is on an
+ * interface that is in a bridge with us.
+ */
+ if (!foreign_dev_check_cb)
+ return err;
+
+ br = netdev_master_upper_dev_get(dev);
+ if (!br || !netif_is_bridge_master(br))
+ return err;
+
+ switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
+ if (!switchdev)
+ return err;
+
+ if (!foreign_dev_check_cb(switchdev, dev))
+ return err;
+
+ return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
+ foreign_dev_check_cb, del_cb);
}
+/* Pass through a port object deletion, if @dev passes @check_cb, or replicate
+ * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
+ * bridge or a LAG.
+ */
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
@@ -744,13 +726,35 @@ int switchdev_handle_port_obj_del(struct net_device *dev,
int err;
err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
- del_cb);
+ NULL, del_cb);
if (err == -EOPNOTSUPP)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
+/* Same as switchdev_handle_port_obj_del(), except if object is notified on a
+ * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
+ * that pass @check_cb and are in the same bridge as @dev.
+ */
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
+ foreign_dev_check_cb, del_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
+
static int __switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
diff --git a/net/tsn/Kconfig b/net/tsn/Kconfig
new file mode 100644
index 000000000000..9f22807a74c9
--- /dev/null
+++ b/net/tsn/Kconfig
@@ -0,0 +1,15 @@
+config TSN
+ bool "802.1 Time-Sensitive Networking support"
+ default n
+ depends on VLAN_8021Q && PTP_1588_CLOCK
+ help
+ This enables support for TSN(time sensitive networking)
+ TSN features include:
+ 802.1Qav:
+ 802.1Qbv:
+ 802.1Qci:
+ 802.1Qbu:
+ 802.1AS:
+ 802.1CB:
+
+ If unsure, say N.
diff --git a/net/tsn/Makefile b/net/tsn/Makefile
new file mode 100644
index 000000000000..ed46381bf24f
--- /dev/null
+++ b/net/tsn/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TSN) += genl_tsn.o
diff --git a/net/tsn/genl_tsn.c b/net/tsn/genl_tsn.c
new file mode 100644
index 000000000000..d0dc368de331
--- /dev/null
+++ b/net/tsn/genl_tsn.c
@@ -0,0 +1,3730 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/version.h>
+#include <net/tsn.h>
+
+#define NLA_PARSE_NESTED(a, b, c, d) \
+ nla_parse_nested_deprecated(a, b, c, d, NULL)
+#define NLA_PUT_U64(a, b, c) nla_put_u64_64bit(a, b, c, NLA_U64)
+
+static struct genl_family tsn_family;
+
+LIST_HEAD(port_list);
+
+static const struct nla_policy tsn_cmd_policy[TSN_CMD_ATTR_MAX + 1] = {
+ [TSN_CMD_ATTR_MESG] = { .type = NLA_STRING },
+ [TSN_CMD_ATTR_DATA] = { .type = NLA_S32 },
+ [TSN_ATTR_IFNAME] = { .type = NLA_STRING },
+ [TSN_ATTR_PORT_NUMBER] = { .type = NLA_U8 },
+ [TSN_ATTR_CAP] = { .type = NLA_NESTED },
+ [TSN_ATTR_QBV] = { .type = NLA_NESTED },
+ [TSN_ATTR_STREAM_IDENTIFY] = { .type = NLA_NESTED },
+ [TSN_ATTR_QCI_SP] = { .type = NLA_NESTED },
+ [TSN_ATTR_QCI_SFI] = { .type = NLA_NESTED },
+ [TSN_ATTR_QCI_SGI] = { .type = NLA_NESTED },
+ [TSN_ATTR_QCI_FMI] = { .type = NLA_NESTED },
+ [TSN_ATTR_CBS] = { .type = NLA_NESTED },
+ [TSN_ATTR_TSD] = { .type = NLA_NESTED },
+ [TSN_ATTR_QBU] = { .type = NLA_NESTED },
+ [TSN_ATTR_CT] = { .type = NLA_NESTED },
+ [TSN_ATTR_CBGEN] = { .type = NLA_NESTED },
+ [TSN_ATTR_CBREC] = { .type = NLA_NESTED },
+ [TSN_ATTR_CBSTAT] = { .type = NLA_NESTED },
+ [TSN_ATTR_DSCP] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy tsn_cap_policy[TSN_CAP_ATTR_MAX + 1] = {
+ [TSN_CAP_ATTR_QBV] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_QCI] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_QBU] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_CBS] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_CB] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_TBS] = { .type = NLA_FLAG },
+ [TSN_CAP_ATTR_CTH] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy qci_cap_policy[TSN_QCI_STREAM_ATTR_MAX + 1] = {
+ [TSN_QCI_STREAM_ATTR_MAX_SFI] = { .type = NLA_U32 },
+ [TSN_QCI_STREAM_ATTR_MAX_SGI] = { .type = NLA_U32 },
+ [TSN_QCI_STREAM_ATTR_MAX_FMI] = { .type = NLA_U32 },
+ [TSN_QCI_STREAM_ATTR_SLM] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy ct_policy[TSN_CT_ATTR_MAX + 1] = {
+ [TSN_CT_ATTR_QUEUE_STATE] = { .type = NLA_U8 }
+};
+
+static const struct nla_policy cbgen_policy[TSN_CBGEN_ATTR_MAX + 1] = {
+ [TSN_CBGEN_ATTR_INDEX] = { .type = NLA_U32 },
+ [TSN_CBGEN_ATTR_PORT_MASK] = { .type = NLA_U8 },
+ [TSN_CBGEN_ATTR_SPLIT_MASK] = { .type = NLA_U8 },
+ [TSN_CBGEN_ATTR_SEQ_LEN] = { .type = NLA_U8 },
+ [TSN_CBGEN_ATTR_SEQ_NUM] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy cbrec_policy[TSN_CBREC_ATTR_MAX + 1] = {
+ [TSN_CBREC_ATTR_INDEX] = { .type = NLA_U32 },
+ [TSN_CBREC_ATTR_SEQ_LEN] = { .type = NLA_U8 },
+ [TSN_CBREC_ATTR_HIS_LEN] = { .type = NLA_U8 },
+ [TSN_CBREC_ATTR_TAG_POP_EN] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy cbstat_policy[TSN_CBSTAT_ATTR_MAX + 1] = {
+ [TSN_CBSTAT_ATTR_INDEX] = { .type = NLA_U32 },
+ [TSN_CBSTAT_ATTR_GEN_REC] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_ERR] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_SEQ_NUM] = { .type = NLA_U32 },
+ [TSN_CBSTAT_ATTR_SEQ_LEN] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_SPLIT_MASK] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_PORT_MASK] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_HIS_LEN] = { .type = NLA_U8 },
+ [TSN_CBSTAT_ATTR_SEQ_HIS] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy qbu_policy[TSN_QBU_ATTR_MAX + 1] = {
+ [TSN_QBU_ATTR_ADMIN_STATE] = { .type = NLA_U8 },
+ [TSN_QBU_ATTR_HOLD_ADVANCE] = { .type = NLA_U32},
+ [TSN_QBU_ATTR_RELEASE_ADVANCE] = { .type = NLA_U32},
+ [TSN_QBU_ATTR_ACTIVE] = { .type = NLA_FLAG},
+ [TSN_QBU_ATTR_HOLD_REQUEST] = { .type = NLA_U8},
+};
+
+static const struct nla_policy cbs_policy[TSN_CBS_ATTR_MAX + 1] = {
+ [TSN_CBS_ATTR_TC_INDEX] = { .type = NLA_U8},
+ [TSN_CBS_ATTR_BW] = { .type = NLA_U8},
+};
+
+static const struct nla_policy tsd_policy[TSN_TSD_ATTR_MAX + 1] = {
+ [TSN_TSD_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_TSD_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_TSD_ATTR_PERIOD] = { .type = NLA_U32},
+ [TSN_TSD_ATTR_MAX_FRM_NUM] = { .type = NLA_U32},
+ [TSN_TSD_ATTR_CYCLE_NUM] = { .type = NLA_U32},
+ [TSN_TSD_ATTR_LOSS_STEPS] = { .type = NLA_U32},
+ [TSN_TSD_ATTR_SYN_IMME] = { .type = NLA_FLAG},
+};
+
+static const struct nla_policy qbv_policy[TSN_QBV_ATTR_MAX + 1] = {
+ [TSN_QBV_ATTR_ADMINENTRY] = { .type = NLA_NESTED},
+ [TSN_QBV_ATTR_OPERENTRY] = { .type = NLA_NESTED},
+ [TSN_QBV_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_QBV_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_QBV_ATTR_CONFIGCHANGE] = { .type = NLA_FLAG},
+ [TSN_QBV_ATTR_CONFIGCHANGETIME] = { .type = NLA_U64},
+ [TSN_QBV_ATTR_MAXSDU] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_GRANULARITY] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_CURRENTTIME] = { .type = NLA_U64},
+ [TSN_QBV_ATTR_CONFIGPENDING] = {.type = NLA_FLAG},
+ [TSN_QBV_ATTR_CONFIGCHANGEERROR] = { .type = NLA_U64},
+ [TSN_QBV_ATTR_LISTMAX] = { .type = NLA_U32},
+};
+
+static const struct nla_policy qbv_ctrl_policy[TSN_QBV_ATTR_CTRL_MAX + 1] = {
+ [TSN_QBV_ATTR_CTRL_LISTCOUNT] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_CTRL_GATESTATE] = { .type = NLA_U8},
+ [TSN_QBV_ATTR_CTRL_CYCLETIME] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_CTRL_CYCLETIMEEXT] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_CTRL_BASETIME] = { .type = NLA_U64},
+ [TSN_QBV_ATTR_CTRL_LISTENTRY] = { .type = NLA_NESTED},
+};
+
+static const struct nla_policy qbv_entry_policy[TSN_QBV_ATTR_ENTRY_MAX + 1] = {
+ [TSN_QBV_ATTR_ENTRY_ID] = { .type = NLA_U32},
+ [TSN_QBV_ATTR_ENTRY_GC] = { .type = NLA_U8},
+ [TSN_QBV_ATTR_ENTRY_TM] = { .type = NLA_U32},
+};
+
+static const struct nla_policy cb_streamid_policy[TSN_STREAMID_ATTR_MAX + 1] = {
+ [TSN_STREAMID_ATTR_INDEX] = { .type = NLA_U32},
+ [TSN_STREAMID_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_STREAMID_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_STREAMID_ATTR_STREAM_HANDLE] = { .type = NLA_S32},
+ [TSN_STREAMID_ATTR_IFOP] = { .type = NLA_U32},
+ [TSN_STREAMID_ATTR_OFOP] = { .type = NLA_U32},
+ [TSN_STREAMID_ATTR_IFIP] = { .type = NLA_U32},
+ [TSN_STREAMID_ATTR_OFIP] = { .type = NLA_U32},
+ [TSN_STREAMID_ATTR_TYPE] = { .type = NLA_U8},
+ [TSN_STREAMID_ATTR_NDMAC] = { .type = NLA_U64},
+ [TSN_STREAMID_ATTR_NTAGGED] = { .type = NLA_U8},
+ [TSN_STREAMID_ATTR_NVID] = { .type = NLA_U16},
+ [TSN_STREAMID_ATTR_SMAC] = { .type = NLA_U64},
+ [TSN_STREAMID_ATTR_STAGGED] = { .type = NLA_U8},
+ [TSN_STREAMID_ATTR_SVID] = { .type = NLA_U16},
+ [TSN_STREAMID_ATTR_COUNTERS_PSI] = { .type = NLA_U64},
+ [TSN_STREAMID_ATTR_COUNTERS_PSO] = { .type = NLA_U64},
+ [TSN_STREAMID_ATTR_COUNTERS_PSPPI] = { .type = NLA_U64},
+ [TSN_STREAMID_ATTR_COUNTERS_PSPPO] = { .type = NLA_U64},
+};
+
+static const struct nla_policy qci_sfi_policy[TSN_QCI_SFI_ATTR_MAX + 1] = {
+ [TSN_QCI_SFI_ATTR_INDEX] = { .type = NLA_U32},
+ [TSN_QCI_SFI_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_SFI_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_SFI_ATTR_STREAM_HANDLE] = { .type = NLA_S32},
+ [TSN_QCI_SFI_ATTR_PRIO_SPEC] = { .type = NLA_S8},
+ [TSN_QCI_SFI_ATTR_GATE_ID] = { .type = NLA_U32},
+ [TSN_QCI_SFI_ATTR_FILTER_TYPE] = { .type = NLA_U8},
+ [TSN_QCI_SFI_ATTR_FLOW_ID] = { .type = NLA_S32},
+ [TSN_QCI_SFI_ATTR_MAXSDU] = { .type = NLA_U16},
+ [TSN_QCI_SFI_ATTR_COUNTERS] = {
+ .len = sizeof(struct tsn_qci_psfp_sfi_counters)},
+ [TSN_QCI_SFI_ATTR_OVERSIZE_ENABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_SFI_ATTR_OVERSIZE] = { .type = NLA_FLAG},
+};
+
+static const struct nla_policy qci_sgi_policy[] = {
+ [TSN_QCI_SGI_ATTR_INDEX] = { .type = NLA_U32},
+ [TSN_QCI_SGI_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_CONFCHANGE] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_IRXEN] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_IRX] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_OEXEN] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_OEX] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_ADMINENTRY] = { .type = NLA_NESTED},
+ [TSN_QCI_SGI_ATTR_OPERENTRY] = { .type = NLA_NESTED},
+ [TSN_QCI_SGI_ATTR_CCTIME] = { .type = NLA_U64},
+ [TSN_QCI_SGI_ATTR_TICKG] = { .type = NLA_U32},
+ [TSN_QCI_SGI_ATTR_CUTIME] = { .type = NLA_U64},
+ [TSN_QCI_SGI_ATTR_CPENDING] = { .type = NLA_FLAG},
+ [TSN_QCI_SGI_ATTR_CCERROR] = { .type = NLA_U64},
+};
+
+static const struct nla_policy qci_sgi_ctrl_policy[] = {
+ [TSN_SGI_ATTR_CTRL_INITSTATE] = { .type = NLA_FLAG},
+ [TSN_SGI_ATTR_CTRL_LEN] = { .type = NLA_U8},
+ [TSN_SGI_ATTR_CTRL_CYTIME] = { .type = NLA_U32},
+ [TSN_SGI_ATTR_CTRL_CYTIMEEX] = { .type = NLA_U32},
+ [TSN_SGI_ATTR_CTRL_BTIME] = { .type = NLA_U64},
+ [TSN_SGI_ATTR_CTRL_INITIPV] = { .type = NLA_S8},
+ [TSN_SGI_ATTR_CTRL_GCLENTRY] = { .type = NLA_NESTED},
+};
+
+static const struct nla_policy qci_sgi_gcl_policy[] = {
+ [TSN_SGI_ATTR_GCL_GATESTATE] = { .type = NLA_FLAG},
+ [TSN_SGI_ATTR_GCL_IPV] = { .type = NLA_S8},
+ [TSN_SGI_ATTR_GCL_INTERVAL] = { .type = NLA_U32},
+ [TSN_SGI_ATTR_GCL_OCTMAX] = { .type = NLA_U32},
+};
+
+static const struct nla_policy qci_fmi_policy[] = {
+ [TSN_QCI_FMI_ATTR_INDEX] = { .type = NLA_U32},
+ [TSN_QCI_FMI_ATTR_ENABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_CIR] = { .type = NLA_U32},
+ [TSN_QCI_FMI_ATTR_CBS] = { .type = NLA_U32},
+ [TSN_QCI_FMI_ATTR_EIR] = { .type = NLA_U32},
+ [TSN_QCI_FMI_ATTR_EBS] = { .type = NLA_U32},
+ [TSN_QCI_FMI_ATTR_CF] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_CM] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_DROPYL] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_MAREDEN] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_MARED] = { .type = NLA_FLAG},
+ [TSN_QCI_FMI_ATTR_COUNTERS] = {
+ .len = sizeof(struct tsn_qci_psfp_fmi_counters)},
+};
+
+static const struct nla_policy dscp_policy[] = {
+ [TSN_DSCP_ATTR_INDEX] = { .type = NLA_U32},
+ [TSN_DSCP_ATTR_DISABLE] = { .type = NLA_FLAG},
+ [TSN_DSCP_ATTR_COS] = { .type = NLA_U8},
+ [TSN_DSCP_ATTR_DPL] = { .type = NLA_U8},
+};
+
+static ATOMIC_NOTIFIER_HEAD(tsn_notif_chain);
+
+/**
+ * register_tsn_notifier - Register notifier
+ * @nb: notifier_block
+ *
+ * Register switch device notifier.
+ */
+int register_tsn_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&tsn_notif_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_tsn_notifier);
+
+/**
+ * unregister_tsn_notifier - Unregister notifier
+ * @nb: notifier_block
+ *
+ * Unregister switch device notifier.
+ */
+int unregister_tsn_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&tsn_notif_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_tsn_notifier);
+
+/**
+ * call_tsn_notifiers - Call notifiers
+ * @val: value passed unmodified to notifier function
+ * @dev: port device
+ * @info: notifier information data
+ *
+ * Call all network notifier blocks.
+ */
+int call_tsn_notifiers(unsigned long val, struct net_device *dev,
+ struct tsn_notifier_info *info)
+{
+ info->dev = dev;
+ return atomic_notifier_call_chain(&tsn_notif_chain, val, info);
+}
+EXPORT_SYMBOL_GPL(call_tsn_notifiers);
+
+struct tsn_port *tsn_get_port(struct net_device *ndev)
+{
+ struct tsn_port *port;
+ bool tsn_found = false;
+
+ list_for_each_entry(port, &port_list, list) {
+ if (port->netdev == ndev) {
+ tsn_found = true;
+ break;
+ }
+ }
+
+ if (!tsn_found)
+ return NULL;
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(tsn_get_port);
+
+static int tsn_prepare_reply(struct genl_info *info, u8 cmd,
+ struct sk_buff **skbp, size_t size)
+{
+ struct sk_buff *skb;
+ void *reply;
+
+ /* If new attributes are added, please revisit this allocation
+ */
+ skb = genlmsg_new(size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ if (!info) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ reply = genlmsg_put_reply(skb, info, &tsn_family, 0, cmd);
+ if (!reply) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ *skbp = skb;
+ return 0;
+}
+
+static int tsn_mk_reply(struct sk_buff *skb, int aggr, void *data, int len)
+{
+ /* add a netlink attribute to a socket buffer */
+ return nla_put(skb, aggr, len, data);
+}
+
+static int tsn_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
+ void *reply = genlmsg_data(genlhdr);
+
+ genlmsg_end(skb, reply);
+
+ return genlmsg_reply(skb, info);
+}
+
+static int cmd_attr_echo_message(struct genl_info *info)
+{
+ struct nlattr *na;
+ char *msg;
+ struct sk_buff *rep_skb;
+ size_t size;
+ int ret;
+
+ na = info->attrs[TSN_CMD_ATTR_MESG];
+ if (!na)
+ return -EINVAL;
+
+ msg = (char *)nla_data(na);
+ pr_info("tsn generic netlink receive echo mesg %s\n", msg);
+
+ size = nla_total_size(strlen(msg) + 1);
+
+ ret = tsn_prepare_reply(info, TSN_CMD_REPLY, &rep_skb,
+ size + NLMSG_ALIGN(MAX_USER_SIZE));
+ if (ret < 0)
+ return ret;
+
+ ret = tsn_mk_reply(rep_skb, TSN_CMD_ATTR_MESG, msg, size);
+ if (ret < 0)
+ goto err;
+
+ return tsn_send_reply(rep_skb, info);
+
+err:
+ nlmsg_free(rep_skb);
+ return ret;
+}
+
+static int cmd_attr_echo_data(struct genl_info *info)
+{
+ struct nlattr *na;
+ s32 data;
+ struct sk_buff *rep_skb;
+ size_t size;
+ int ret;
+
+ /*read data */
+ na = info->attrs[TSN_CMD_ATTR_DATA];
+ if (!na)
+ return -EINVAL;
+
+ data = nla_get_s32(info->attrs[TSN_CMD_ATTR_DATA]);
+ pr_info("tsn generic netlink receive echo data %d\n", data);
+
+ /* send back */
+ size = nla_total_size(sizeof(s32));
+
+ ret = tsn_prepare_reply(info, TSN_CMD_REPLY, &rep_skb,
+ size + NLMSG_ALIGN(MAX_USER_SIZE));
+ if (ret < 0)
+ return ret;
+
+ /* netlink lib func */
+ ret = nla_put_s32(rep_skb, TSN_CMD_ATTR_DATA, data);
+ if (ret < 0)
+ goto err;
+
+ return tsn_send_reply(rep_skb, info);
+
+err:
+ nlmsg_free(rep_skb);
+ return ret;
+}
+
+static int tsn_echo_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_CMD_ATTR_MESG])
+ return cmd_attr_echo_message(info);
+ else if (info->attrs[TSN_CMD_ATTR_DATA])
+ return cmd_attr_echo_data(info);
+
+ return -EINVAL;
+}
+
+static int tsn_simple_reply(struct genl_info *info, u32 cmd,
+ char *portname, s32 retvalue)
+{
+ struct sk_buff *rep_skb;
+ size_t size;
+ int ret;
+
+ /* send back */
+ size = nla_total_size(strlen(portname) + 1);
+ size += nla_total_size(sizeof(s32));
+
+ ret = tsn_prepare_reply(info, cmd,
+ &rep_skb, size + NLMSG_ALIGN(MAX_USER_SIZE));
+ if (ret < 0)
+ return ret;
+
+ /* netlink lib func */
+ ret = nla_put_string(rep_skb, TSN_ATTR_IFNAME, portname);
+ if (ret < 0)
+ goto err;
+
+ ret = nla_put_s32(rep_skb, TSN_CMD_ATTR_DATA, retvalue);
+ if (ret < 0)
+ goto err;
+
+ return tsn_send_reply(rep_skb, info);
+
+err:
+ nlmsg_free(rep_skb);
+ return ret;
+}
+
+struct tsn_port *tsn_init_check(struct genl_info *info,
+ struct net_device **ndev)
+{
+ struct nlattr *na;
+ char *portname;
+ struct net_device *netdev;
+ struct tsn_port *port;
+ bool tsn_found = false;
+
+ if (!info->attrs[TSN_ATTR_IFNAME]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ "no portname", -EINVAL);
+ return NULL;
+ }
+
+ na = info->attrs[TSN_ATTR_IFNAME];
+
+ portname = (char *)nla_data(na);
+
+ netdev = __dev_get_by_name(genl_info_net(info), portname);
+ if (!netdev) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ "error device", -ENODEV);
+ return NULL;
+ }
+
+ list_for_each_entry(port, &port_list, list) {
+ if (port->netdev == netdev) {
+ tsn_found = true;
+ break;
+ }
+ }
+
+ if (!tsn_found) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -ENODEV);
+ return NULL;
+ }
+
+ *ndev = netdev;
+
+ return port;
+}
+
+static int tsn_cap_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *rep_skb;
+ struct nlattr *tsn_cap_attr;
+ int ret;
+ u32 cap = 0;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ tsnops = port->tsnops;
+ genlhdr = info->genlhdr;
+ if (!tsnops->get_capability) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ cap = tsnops->get_capability(netdev);
+ if (!cap) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Pad netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ goto out;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ tsn_cap_attr = nla_nest_start_noflag(rep_skb, TSN_ATTR_CAP);
+ if (!tsn_cap_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ if (cap & TSN_CAP_QBV) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_QBV))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_QCI) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_QCI))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_QBU) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_QBU))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_CBS) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_CBS))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_CB) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_CB))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_TBS) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_TBS))
+ goto err;
+ }
+
+ if (cap & TSN_CAP_CTH) {
+ if (nla_put_flag(rep_skb, TSN_CAP_ATTR_CTH))
+ goto err;
+ }
+
+ nla_nest_end(rep_skb, tsn_cap_attr);
+
+ tsn_send_reply(rep_skb, info);
+ return 0;
+err:
+ nlmsg_free(rep_skb);
+out:
+ if (ret < 0)
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int cmd_cb_streamid_set(struct genl_info *info)
+{
+ struct nlattr *na, *sid[TSN_STREAMID_ATTR_MAX + 1];
+ u32 sid_index;
+ u8 iden_type = 1;
+ bool enable;
+ int ret;
+ struct net_device *netdev;
+ struct tsn_cb_streamid sidconf;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&sidconf, 0, sizeof(struct tsn_cb_streamid));
+
+ if (!info->attrs[TSN_ATTR_STREAM_IDENTIFY])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_STREAM_IDENTIFY];
+
+ ret = NLA_PARSE_NESTED(sid, TSN_STREAMID_ATTR_MAX,
+ na, cb_streamid_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sid[TSN_STREAMID_ATTR_INDEX])
+ return -EINVAL;
+
+ sid_index = nla_get_u32(sid[TSN_STREAMID_ATTR_INDEX]);
+
+ if (sid[TSN_STREAMID_ATTR_ENABLE])
+ enable = true;
+ else if (sid[TSN_STREAMID_ATTR_DISABLE])
+ enable = false;
+ else
+ return -EINVAL;
+
+ if (!enable)
+ goto loaddev;
+
+ if (sid[TSN_STREAMID_ATTR_TYPE])
+ iden_type = nla_get_u8(sid[TSN_STREAMID_ATTR_TYPE]);
+ else
+ return -EINVAL;
+
+ sidconf.type = iden_type;
+ switch (iden_type) {
+ case STREAMID_NULL:
+ if (!sid[TSN_STREAMID_ATTR_NDMAC] ||
+ !sid[TSN_STREAMID_ATTR_NTAGGED] ||
+ !sid[TSN_STREAMID_ATTR_NVID]) {
+ return -EINVAL;
+ }
+
+ sidconf.para.nid.dmac =
+ nla_get_u64(sid[TSN_STREAMID_ATTR_NDMAC]);
+ sidconf.para.nid.tagged =
+ nla_get_u8(sid[TSN_STREAMID_ATTR_NTAGGED]);
+ sidconf.para.nid.vid =
+ nla_get_u16(sid[TSN_STREAMID_ATTR_NVID]);
+ break;
+ case STREAMID_SMAC_VLAN:
+ /* TODO: not supportted yet */
+ if (!sid[TSN_STREAMID_ATTR_SMAC] ||
+ !sid[TSN_STREAMID_ATTR_STAGGED] ||
+ !sid[TSN_STREAMID_ATTR_SVID]) {
+ return -EINVAL;
+ }
+
+ sidconf.para.sid.smac =
+ nla_get_u64(sid[TSN_STREAMID_ATTR_SMAC]);
+ sidconf.para.sid.tagged =
+ nla_get_u8(sid[TSN_STREAMID_ATTR_STAGGED]);
+ sidconf.para.sid.vid =
+ nla_get_u16(sid[TSN_STREAMID_ATTR_SVID]);
+ break;
+ case STREAMID_DMAC_VLAN:
+
+ case STREAMID_IP:
+
+ default:
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (sid[TSN_STREAMID_ATTR_STREAM_HANDLE])
+ sidconf.handle =
+ nla_get_s32(sid[TSN_STREAMID_ATTR_STREAM_HANDLE]);
+
+ if (sid[TSN_STREAMID_ATTR_IFOP])
+ sidconf.ifac_oport = nla_get_u32(sid[TSN_STREAMID_ATTR_IFOP]);
+ if (sid[TSN_STREAMID_ATTR_OFOP])
+ sidconf.ofac_oport = nla_get_u32(sid[TSN_STREAMID_ATTR_OFOP]);
+ if (sid[TSN_STREAMID_ATTR_IFIP])
+ sidconf.ifac_iport = nla_get_u32(sid[TSN_STREAMID_ATTR_IFIP]);
+ if (sid[TSN_STREAMID_ATTR_OFIP])
+ sidconf.ofac_iport = nla_get_u32(sid[TSN_STREAMID_ATTR_OFIP]);
+
+loaddev:
+ if (!tsnops->cb_streamid_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EOPNOTSUPP;
+ }
+
+ ret = tsnops->cb_streamid_set(netdev, sid_index, enable, &sidconf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+ }
+
+ /* simple reply here. To be continue */
+ if (tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, 0))
+ return -1;
+
+ return 0;
+}
+
+static int tsn_cb_streamid_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_cb_streamid_set(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_cb_streamid_get(struct genl_info *info)
+{
+ struct nlattr *na, *sidattr, *sid[TSN_STREAMID_ATTR_MAX + 1];
+ u32 sid_index;
+ struct genlmsghdr *genlhdr;
+ struct sk_buff *rep_skb;
+ int ret, i;
+ int valid;
+ struct net_device *netdev;
+ struct tsn_cb_streamid sidconf;
+ struct tsn_cb_streamid_counters sidcounts;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&sidconf, 0, sizeof(struct tsn_cb_streamid));
+ memset(&sidcounts, 0, sizeof(struct tsn_cb_streamid_counters));
+
+ if (!info->attrs[TSN_ATTR_STREAM_IDENTIFY])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_STREAM_IDENTIFY];
+
+ ret = NLA_PARSE_NESTED(sid, TSN_STREAMID_ATTR_MAX,
+ na, cb_streamid_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sid[TSN_STREAMID_ATTR_INDEX])
+ return -EINVAL;
+
+ sid_index = nla_get_u32(sid[TSN_STREAMID_ATTR_INDEX]);
+
+ if (!tsnops->cb_streamid_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ ret = -EINVAL;
+ goto exit;
+ } else {
+ valid = tsnops->cb_streamid_get(netdev, sid_index, &sidconf);
+ if (valid < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, valid);
+ return valid;
+ }
+ }
+
+ /* send back */
+ genlhdr = info->genlhdr;
+ ret = tsn_prepare_reply(info, genlhdr->cmd, &rep_skb,
+ NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ /* input netlink the parameters */
+ sidattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_STREAM_IDENTIFY);
+ if (!sidattr) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (nla_put_u32(rep_skb, TSN_STREAMID_ATTR_INDEX, sid_index))
+ goto err;
+
+ if (valid == 1) {
+ if (nla_put_flag(rep_skb, TSN_STREAMID_ATTR_ENABLE))
+ goto err;
+ } else if (valid == 0) {
+ if (nla_put_flag(rep_skb, TSN_STREAMID_ATTR_DISABLE))
+ goto err;
+ } else {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ goto err;
+ }
+
+ if (nla_put_s32(rep_skb,
+ TSN_STREAMID_ATTR_STREAM_HANDLE, sidconf.handle) ||
+ nla_put_u32(rep_skb, TSN_STREAMID_ATTR_IFOP, sidconf.ifac_oport) ||
+ nla_put_u32(rep_skb, TSN_STREAMID_ATTR_OFOP, sidconf.ofac_oport) ||
+ nla_put_u32(rep_skb, TSN_STREAMID_ATTR_IFIP, sidconf.ifac_iport) ||
+ nla_put_u32(rep_skb, TSN_STREAMID_ATTR_OFIP, sidconf.ofac_iport) ||
+ nla_put_u8(rep_skb, TSN_STREAMID_ATTR_TYPE, sidconf.type))
+ goto err;
+
+ switch (sidconf.type) {
+ case STREAMID_NULL:
+ if (NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_NDMAC,
+ sidconf.para.nid.dmac) ||
+ nla_put_u16(rep_skb, TSN_STREAMID_ATTR_NVID,
+ sidconf.para.nid.vid) ||
+ nla_put_u8(rep_skb, TSN_STREAMID_ATTR_NTAGGED,
+ sidconf.para.nid.tagged))
+ goto err;
+ break;
+ case STREAMID_SMAC_VLAN:
+ if (NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_SMAC,
+ sidconf.para.sid.smac) ||
+ nla_put_u16(rep_skb, TSN_STREAMID_ATTR_SVID,
+ sidconf.para.sid.vid) ||
+ nla_put_u8(rep_skb, TSN_STREAMID_ATTR_STAGGED,
+ sidconf.para.sid.tagged))
+ goto err;
+ break;
+ case STREAMID_DMAC_VLAN:
+ case STREAMID_IP:
+ default:
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ goto err;
+ }
+
+ if (!tsnops->cb_streamid_counters_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ goto err;
+ } else {
+ ret = tsnops->cb_streamid_counters_get(netdev,
+ sid_index,
+ &sidcounts);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ goto err;
+ }
+ }
+
+ if (NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_COUNTERS_PSI,
+ sidcounts.per_stream.input) ||
+ NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_COUNTERS_PSO,
+ sidcounts.per_stream.output))
+ goto err;
+
+ for (i = 0; i < 32; i++) {
+ if (NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_COUNTERS_PSPPI,
+ sidcounts.per_streamport[i].input) ||
+ NLA_PUT_U64(rep_skb, TSN_STREAMID_ATTR_COUNTERS_PSPPO,
+ sidcounts.per_streamport[i].output))
+ goto err;
+ }
+
+ nla_nest_end(rep_skb, sidattr);
+ /* end netlink input the parameters */
+
+ /* netlink lib func */
+ ret = nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name);
+ if (ret < 0)
+ goto err;
+
+ ret = nla_put_s32(rep_skb, TSN_CMD_ATTR_DATA, 0);
+ if (ret < 0)
+ goto err;
+
+ return tsn_send_reply(rep_skb, info);
+
+err:
+ nlmsg_free(rep_skb);
+exit:
+ return ret;
+}
+
+static int tsn_cb_streamid_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_cb_streamid_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmb_cb_streamid_counters_get(struct genl_info *info)
+{
+ return 0;
+}
+
+static int tsn_cb_streamid_counters_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmb_cb_streamid_counters_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int tsn_qci_cap_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *qci_cap;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qci_psfp_stream_param qci_cap_status;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tsnops = port->tsnops;
+
+ genlhdr = info->genlhdr;
+
+ memset(&qci_cap_status, 0, sizeof(qci_cap_status));
+
+ if (!tsnops->qci_get_maxcap) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = tsnops->qci_get_maxcap(netdev, &qci_cap_status);
+ if (ret < 0)
+ goto out;
+
+ /* Pad netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ goto out;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ qci_cap = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_SP);
+ if (!qci_cap) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ if (nla_put_u32(rep_skb, TSN_QCI_STREAM_ATTR_MAX_SFI,
+ qci_cap_status.max_sf_instance) ||
+ nla_put_u32(rep_skb, TSN_QCI_STREAM_ATTR_MAX_SGI,
+ qci_cap_status.max_sg_instance) ||
+ nla_put_u32(rep_skb, TSN_QCI_STREAM_ATTR_MAX_FMI,
+ qci_cap_status.max_fm_instance) ||
+ nla_put_u32(rep_skb, TSN_QCI_STREAM_ATTR_SLM,
+ qci_cap_status.supported_list_max)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ nla_nest_end(rep_skb, qci_cap);
+
+ tsn_send_reply(rep_skb, info);
+
+ return 0;
+err:
+ nlmsg_free(rep_skb);
+out:
+ if (ret < 0)
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+
+ return ret;
+}
+
+static int cmd_qci_sfi_set(struct genl_info *info)
+{
+ struct nlattr *na, *sfi[TSN_QCI_SFI_ATTR_MAX + 1];
+ u32 sfi_handle;
+ bool enable;
+ int ret;
+ struct net_device *netdev;
+ struct tsn_qci_psfp_sfi_conf sficonf;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&sficonf, 0, sizeof(struct tsn_qci_psfp_sfi_conf));
+
+ if (!info->attrs[TSN_ATTR_QCI_SFI])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QCI_SFI];
+
+ ret = NLA_PARSE_NESTED(sfi, TSN_QCI_SFI_ATTR_MAX, na, qci_sfi_policy);
+ if (ret) {
+ pr_info("tsn: parse value TSN_QCI_SFI_ATTR_MAX error.");
+ return -EINVAL;
+ }
+
+ if (!sfi[TSN_QCI_SFI_ATTR_INDEX])
+ return -EINVAL;
+
+ sfi_handle = nla_get_u32(sfi[TSN_QCI_SFI_ATTR_INDEX]);
+
+ if (sfi[TSN_QCI_SFI_ATTR_ENABLE]) {
+ enable = true;
+ } else if (sfi[TSN_QCI_SFI_ATTR_DISABLE]) {
+ enable = false;
+ goto loaddrive;
+ } else {
+ pr_err("tsn: must provde ENABLE or DISABLE attribute.\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!sfi[TSN_QCI_SFI_ATTR_GATE_ID]) {
+ pr_err("tsn: must provide stream gate index\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!sfi[TSN_QCI_SFI_ATTR_STREAM_HANDLE])
+ sficonf.stream_handle_spec = -1;
+ else
+ sficonf.stream_handle_spec =
+ nla_get_s32(sfi[TSN_QCI_SFI_ATTR_STREAM_HANDLE]);
+
+ if (!sfi[TSN_QCI_SFI_ATTR_PRIO_SPEC])
+ sficonf.priority_spec = -1;
+ else
+ sficonf.priority_spec =
+ nla_get_s8(sfi[TSN_QCI_SFI_ATTR_PRIO_SPEC]);
+
+ sficonf.stream_gate_instance_id =
+ nla_get_u32(sfi[TSN_QCI_SFI_ATTR_GATE_ID]);
+
+ if (sfi[TSN_QCI_SFI_ATTR_MAXSDU])
+ sficonf.stream_filter.maximum_sdu_size =
+ nla_get_u16(sfi[TSN_QCI_SFI_ATTR_MAXSDU]);
+ else
+ sficonf.stream_filter.maximum_sdu_size = 0;
+
+ if (sfi[TSN_QCI_SFI_ATTR_FLOW_ID])
+ sficonf.stream_filter.flow_meter_instance_id =
+ nla_get_s32(sfi[TSN_QCI_SFI_ATTR_FLOW_ID]);
+ else
+ sficonf.stream_filter.flow_meter_instance_id = -1;
+
+ if (sfi[TSN_QCI_SFI_ATTR_OVERSIZE_ENABLE])
+ sficonf.block_oversize_enable = true;
+
+ if (sfi[TSN_QCI_SFI_ATTR_OVERSIZE])
+ sficonf.block_oversize = true;
+
+loaddrive:
+ if (!tsnops->qci_sfi_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EINVAL;
+ }
+
+ ret = tsnops->qci_sfi_set(netdev, sfi_handle, enable, &sficonf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+ }
+
+ ret = tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, 0);
+
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int tsn_qci_sfi_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sfi_set(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_sfi_get(struct genl_info *info)
+{
+ struct nlattr *na, *sfiattr;
+ struct nlattr *sfi[TSN_QCI_SFI_ATTR_MAX + 1];
+ u32 sfi_handle;
+ struct sk_buff *rep_skb;
+ int ret, valid = 0;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qci_psfp_sfi_conf sficonf;
+ struct tsn_qci_psfp_sfi_counters sficount;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ genlhdr = info->genlhdr;
+
+ if (!info->attrs[TSN_ATTR_QCI_SFI])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QCI_SFI];
+
+ ret = NLA_PARSE_NESTED(sfi, TSN_QCI_SFI_ATTR_MAX,
+ na, qci_sfi_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sfi[TSN_QCI_SFI_ATTR_INDEX])
+ return -EINVAL;
+
+ sfi_handle = nla_get_u32(sfi[TSN_QCI_SFI_ATTR_INDEX]);
+
+ memset(&sficonf, 0, sizeof(struct tsn_qci_psfp_sfi_conf));
+ memset(&sficount, 0, sizeof(struct tsn_qci_psfp_sfi_counters));
+
+ if (!tsnops->qci_sfi_get || !tsnops->qci_sfi_counters_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ ret = -EINVAL;
+ goto exit;
+ } else {
+ valid = tsnops->qci_sfi_get(netdev, sfi_handle, &sficonf);
+ if (valid < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, valid);
+ return valid;
+ }
+
+ valid = tsnops->qci_sfi_counters_get(netdev, sfi_handle,
+ &sficount);
+ if (valid < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, valid);
+ return valid;
+ }
+ }
+
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ sfiattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_SFI);
+ if (!sfiattr) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (nla_put_u32(rep_skb, TSN_QCI_SFI_ATTR_INDEX, sfi_handle))
+ goto err;
+
+ if (valid) {
+ if (nla_put_flag(rep_skb, TSN_QCI_SFI_ATTR_ENABLE))
+ goto err;
+ } else {
+ if (nla_put_flag(rep_skb, TSN_QCI_SFI_ATTR_DISABLE))
+ goto err;
+ }
+
+ if (nla_put_s32(rep_skb, TSN_QCI_SFI_ATTR_STREAM_HANDLE,
+ sficonf.stream_handle_spec) ||
+ nla_put_s8(rep_skb, TSN_QCI_SFI_ATTR_PRIO_SPEC,
+ sficonf.priority_spec) ||
+ nla_put_u32(rep_skb, TSN_QCI_SFI_ATTR_GATE_ID,
+ sficonf.stream_gate_instance_id))
+ goto err;
+
+ if (sficonf.stream_filter.maximum_sdu_size)
+ if (nla_put_u16(rep_skb, TSN_QCI_SFI_ATTR_MAXSDU,
+ sficonf.stream_filter.maximum_sdu_size))
+ goto err;
+
+ if (sficonf.stream_filter.flow_meter_instance_id >= 0)
+ if (nla_put_s32(rep_skb, TSN_QCI_SFI_ATTR_FLOW_ID,
+ sficonf.stream_filter.flow_meter_instance_id))
+ goto err;
+
+ if (sficonf.block_oversize_enable)
+ if (nla_put_flag(rep_skb, TSN_QCI_SFI_ATTR_OVERSIZE_ENABLE))
+ goto err;
+ if (sficonf.block_oversize)
+ if (nla_put_flag(rep_skb, TSN_QCI_SFI_ATTR_OVERSIZE))
+ goto err;
+
+ if (nla_put(rep_skb, TSN_QCI_SFI_ATTR_COUNTERS,
+ sizeof(struct tsn_qci_psfp_sfi_counters), &sficount))
+ goto err;
+
+ nla_nest_end(rep_skb, sfiattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+exit:
+ return ret;
+}
+
+static int tsn_qci_sfi_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sfi_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_sfi_counters_get(struct genl_info *info)
+{
+ struct nlattr *na, *sfiattr;
+ struct nlattr *sfi[TSN_QCI_SFI_ATTR_MAX + 1];
+ u32 sfi_handle;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qci_psfp_sfi_counters sficount;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ genlhdr = info->genlhdr;
+
+ if (!info->attrs[TSN_ATTR_QCI_SFI])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QCI_SFI];
+
+ ret = NLA_PARSE_NESTED(sfi, TSN_QCI_SFI_ATTR_MAX,
+ na, qci_sfi_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sfi[TSN_QCI_SFI_ATTR_INDEX])
+ return -EINVAL;
+
+ sfi_handle = nla_get_u32(sfi[TSN_QCI_SFI_ATTR_INDEX]);
+
+ memset(&sficount, 0, sizeof(struct tsn_qci_psfp_sfi_counters));
+ if (!tsnops->qci_sfi_counters_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->qci_sfi_counters_get(netdev, sfi_handle, &sficount);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ ret = tsn_prepare_reply(info, genlhdr->cmd, &rep_skb,
+ NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ sfiattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_SFI);
+ if (!sfiattr) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (nla_put_u32(rep_skb, TSN_QCI_SFI_ATTR_INDEX, sfi_handle))
+ goto err;
+
+ ret = tsnops->qci_sfi_counters_get(netdev, sfi_handle, &sficount);
+ if (ret < 0)
+ goto err;
+
+ if (nla_put(rep_skb, TSN_QCI_SFI_ATTR_COUNTERS,
+ sizeof(struct tsn_qci_psfp_sfi_counters), &sficount))
+ goto err;
+
+ nla_nest_end(rep_skb, sfiattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, -EINVAL);
+ return ret;
+}
+
+static int tsn_qci_sfi_counters_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sfi_counters_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_sgi_set(struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *sgia[TSN_QCI_SGI_ATTR_MAX + 1];
+ struct nlattr *admin[TSN_SGI_ATTR_CTRL_MAX + 1];
+ int ret = 0;
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct tsn_qci_psfp_sgi_conf sgi;
+ struct tsn_qci_psfp_gcl *gcl = NULL;
+ u16 sgi_handle = 0;
+ u16 listcount = 0;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&sgi, 0, sizeof(struct tsn_qci_psfp_sgi_conf));
+
+ if (!info->attrs[TSN_ATTR_QCI_SGI]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_QCI_SGI];
+
+ ret = NLA_PARSE_NESTED(sgia, TSN_QCI_SGI_ATTR_MAX,
+ na, qci_sgi_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (sgia[TSN_QCI_SGI_ATTR_ENABLE] && sgia[TSN_QCI_SGI_ATTR_DISABLE]) {
+ pr_err("tsn: enable or disable?\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -1;
+ }
+
+ if (sgia[TSN_QCI_SGI_ATTR_INDEX])
+ sgi_handle = nla_get_u32(sgia[TSN_QCI_SGI_ATTR_INDEX]);
+
+ if (sgia[TSN_QCI_SGI_ATTR_DISABLE]) {
+ sgi.gate_enabled = 0;
+ goto loaddev;
+ } else {
+ /* set default to be enable*/
+ sgi.gate_enabled = 1;
+ }
+
+ if (sgia[TSN_QCI_SGI_ATTR_CONFCHANGE])
+ sgi.config_change = 1;
+
+ if (sgia[TSN_QCI_SGI_ATTR_IRXEN])
+ sgi.block_invalid_rx_enable = 1;
+
+ if (sgia[TSN_QCI_SGI_ATTR_IRX])
+ sgi.block_invalid_rx = 1;
+
+ if (sgia[TSN_QCI_SGI_ATTR_OEXEN])
+ sgi.block_octets_exceeded_enable = 1;
+
+ if (sgia[TSN_QCI_SGI_ATTR_OEX])
+ sgi.block_octets_exceeded = 1;
+
+ if (sgia[TSN_QCI_SGI_ATTR_ADMINENTRY]) {
+ struct nlattr *entry;
+ int rem;
+ int count = 0;
+
+ na = sgia[TSN_QCI_SGI_ATTR_ADMINENTRY];
+ ret = NLA_PARSE_NESTED(admin, TSN_SGI_ATTR_CTRL_MAX,
+ na, qci_sgi_ctrl_policy);
+
+ /* Other parameters in admin control */
+ if (admin[TSN_SGI_ATTR_CTRL_INITSTATE])
+ sgi.admin.gate_states = 1;
+
+ if (admin[TSN_SGI_ATTR_CTRL_CYTIME])
+ sgi.admin.cycle_time =
+ nla_get_u32(admin[TSN_SGI_ATTR_CTRL_CYTIME]);
+
+ if (admin[TSN_SGI_ATTR_CTRL_CYTIMEEX])
+ sgi.admin.cycle_time_extension =
+ nla_get_u32(admin[TSN_SGI_ATTR_CTRL_CYTIMEEX]);
+
+ if (admin[TSN_SGI_ATTR_CTRL_BTIME])
+ sgi.admin.base_time =
+ nla_get_u64(admin[TSN_SGI_ATTR_CTRL_BTIME]);
+
+ if (admin[TSN_SGI_ATTR_CTRL_INITIPV])
+ sgi.admin.init_ipv =
+ nla_get_s8(admin[TSN_SGI_ATTR_CTRL_INITIPV]);
+ else
+ sgi.admin.init_ipv = -1;
+
+ if (admin[TSN_SGI_ATTR_CTRL_LEN]) {
+ sgi.admin.control_list_length =
+ nla_get_u8(admin[TSN_SGI_ATTR_CTRL_LEN]);
+ listcount = sgi.admin.control_list_length;
+ }
+
+ if (!listcount)
+ goto loaddev;
+
+ gcl = kmalloc_array(listcount, sizeof(*gcl), GFP_KERNEL);
+
+ memset(gcl, 0, listcount * sizeof(struct tsn_qci_psfp_gcl));
+
+ /* Check the whole admin attrs,
+ * checkout the TSN_SGI_ATTR_CTRL_GCLENTRY attributes
+ */
+ nla_for_each_nested(entry, na, rem) {
+ struct nlattr *gcl_entry[TSN_SGI_ATTR_GCL_MAX + 1];
+ struct nlattr *ti, *om;
+
+ if (nla_type(entry) != TSN_SGI_ATTR_CTRL_GCLENTRY)
+ continue;
+
+ /* parse each TSN_SGI_ATTR_CTRL_GCLENTRY */
+ ret = NLA_PARSE_NESTED(gcl_entry, TSN_SGI_ATTR_GCL_MAX,
+ entry, qci_sgi_gcl_policy);
+ /* Parse gate control list */
+ if (gcl_entry[TSN_SGI_ATTR_GCL_GATESTATE])
+ (gcl + count)->gate_state = 1;
+
+ if (gcl_entry[TSN_SGI_ATTR_GCL_IPV])
+ (gcl + count)->ipv =
+ nla_get_s8(gcl_entry[TSN_SGI_ATTR_GCL_IPV]);
+
+ if (gcl_entry[TSN_SGI_ATTR_GCL_INTERVAL]) {
+ ti = gcl_entry[TSN_SGI_ATTR_GCL_INTERVAL];
+ (gcl + count)->time_interval = nla_get_u32(ti);
+ }
+
+ if (gcl_entry[TSN_SGI_ATTR_GCL_OCTMAX]) {
+ om = gcl_entry[TSN_SGI_ATTR_GCL_OCTMAX];
+ (gcl + count)->octet_max = nla_get_u32(om);
+ }
+
+ count++;
+
+ if (count >= listcount)
+ break;
+ }
+
+ if (count < listcount) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ pr_err("tsn: count less than TSN_SGI_ATTR_CTRL_LEN\n");
+ kfree(gcl);
+ return -EINVAL;
+ }
+
+ } else {
+ pr_info("tsn: no admin list parameters setting\n");
+ }
+
+loaddev:
+ if (!tsnops->qci_sgi_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ kfree(gcl);
+ return -EINVAL;
+ }
+
+ sgi.admin.gcl = gcl;
+
+ ret = tsnops->qci_sgi_set(netdev, sgi_handle, &sgi);
+ kfree(gcl);
+ if (!ret)
+ return tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qci_sgi_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sgi_set(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_sgi_get(struct genl_info *info)
+{
+ struct nlattr *na, *sgiattr, *adminattr, *sglattr;
+ struct nlattr *sgi[TSN_QCI_SGI_ATTR_MAX + 1];
+ struct sk_buff *rep_skb;
+ int ret;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qci_psfp_sgi_conf sgiadmin;
+ struct tsn_qci_psfp_gcl *gcl = NULL;
+ const struct tsn_ops *tsnops;
+ u16 sgi_handle;
+ u8 listcount, i;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_QCI_SGI]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ pr_err("tsn: no sgi handle input\n");
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_QCI_SGI];
+
+ ret = NLA_PARSE_NESTED(sgi, TSN_QCI_SGI_ATTR_MAX,
+ na, qci_sgi_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sgi[TSN_QCI_SGI_ATTR_INDEX]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ pr_err("tsn: no sgi handle input\n");
+ return -EINVAL;
+ }
+
+ sgi_handle = nla_get_u32(sgi[TSN_QCI_SGI_ATTR_INDEX]);
+
+ /* Get config data from device */
+ genlhdr = info->genlhdr;
+
+ memset(&sgiadmin, 0, sizeof(struct tsn_qci_psfp_sgi_conf));
+
+ if (!tsnops->qci_sgi_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->qci_sgi_get(netdev, sgi_handle, &sgiadmin);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ sgiattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_SGI);
+ if (!sgiattr)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_QCI_SGI_ATTR_INDEX, sgi_handle))
+ goto err;
+
+ /* Gate enable? sgiadmin.gate_enabled */
+ if (sgiadmin.gate_enabled) {
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_ENABLE))
+ goto err;
+ } else {
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_DISABLE))
+ goto err;
+ }
+
+ if (sgiadmin.config_change)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_CONFCHANGE))
+ goto err;
+
+ if (sgiadmin.block_invalid_rx_enable)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_IRXEN))
+ goto err;
+
+ if (sgiadmin.block_invalid_rx)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_IRX))
+ goto err;
+
+ if (sgiadmin.block_octets_exceeded_enable)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_OEXEN))
+ goto err;
+
+ if (sgiadmin.block_octets_exceeded)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_OEX))
+ goto err;
+
+ /* Administration */
+ adminattr = nla_nest_start_noflag(rep_skb, TSN_QCI_SGI_ATTR_ADMINENTRY);
+ if (!adminattr)
+ goto err;
+
+ if (sgiadmin.admin.gate_states)
+ if (nla_put_flag(rep_skb, TSN_SGI_ATTR_CTRL_INITSTATE))
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_SGI_ATTR_CTRL_CYTIME,
+ sgiadmin.admin.cycle_time) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_CTRL_CYTIMEEX,
+ sgiadmin.admin.cycle_time_extension) ||
+ NLA_PUT_U64(rep_skb, TSN_SGI_ATTR_CTRL_BTIME,
+ sgiadmin.admin.base_time) ||
+ nla_put_u8(rep_skb, TSN_SGI_ATTR_CTRL_INITIPV,
+ sgiadmin.admin.init_ipv))
+ goto err;
+
+ listcount = sgiadmin.admin.control_list_length;
+ if (!listcount)
+ goto out1;
+
+ if (!sgiadmin.admin.gcl) {
+ pr_err("error: no gate control list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ gcl = sgiadmin.admin.gcl;
+
+ /* loop list */
+ for (i = 0; i < listcount; i++) {
+ s8 ipv;
+ u32 ti, omax;
+
+ if (!(gcl + i)) {
+ pr_err("error: list count too big\n");
+ ret = -EINVAL;
+ kfree(sgiadmin.admin.gcl);
+ goto err;
+ }
+
+ /* Adminastration entry */
+ sglattr = nla_nest_start_noflag(rep_skb,
+ TSN_SGI_ATTR_CTRL_GCLENTRY);
+ if (!sglattr)
+ goto err;
+ ipv = (gcl + i)->ipv;
+ ti = (gcl + i)->time_interval;
+ omax = (gcl + i)->octet_max;
+
+ if ((gcl + i)->gate_state)
+ if (nla_put_flag(rep_skb, TSN_SGI_ATTR_GCL_GATESTATE))
+ goto err;
+
+ if (nla_put_s8(rep_skb, TSN_SGI_ATTR_GCL_IPV, ipv) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_GCL_INTERVAL, ti) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_GCL_OCTMAX, omax))
+ goto err;
+
+ /* End administration entry */
+ nla_nest_end(rep_skb, sglattr);
+ }
+
+ kfree(sgiadmin.admin.gcl);
+ if (nla_put_u8(rep_skb, TSN_SGI_ATTR_CTRL_LEN, listcount))
+ goto err;
+
+out1:
+ /* End adminastration */
+ nla_nest_end(rep_skb, adminattr);
+
+ nla_nest_end(rep_skb, sgiattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qci_sgi_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sgi_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_sgi_status_get(struct genl_info *info)
+{
+ struct nlattr *na, *sgiattr, *operattr, *sglattr;
+ struct nlattr *sgi[TSN_QCI_SGI_ATTR_MAX + 1];
+ struct sk_buff *rep_skb;
+ int ret;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_psfp_sgi_status sgistat;
+ struct tsn_qci_psfp_gcl *gcl = NULL;
+ const struct tsn_ops *tsnops;
+ u16 sgi_handle;
+ u8 listcount;
+ int valid, i;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_QCI_SGI]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ pr_err("tsn: no sgi handle input\n");
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_QCI_SGI];
+
+ ret = NLA_PARSE_NESTED(sgi, TSN_QCI_SGI_ATTR_MAX,
+ na, qci_sgi_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (!sgi[TSN_QCI_SGI_ATTR_INDEX]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ pr_err("tsn: no sgi handle input\n");
+ return -EINVAL;
+ }
+
+ sgi_handle = nla_get_u32(sgi[TSN_QCI_SGI_ATTR_INDEX]);
+
+ /* Get status data from device */
+ genlhdr = info->genlhdr;
+
+ memset(&sgistat, 0, sizeof(struct tsn_psfp_sgi_status));
+
+ if (!tsnops->qci_sgi_status_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ valid = tsnops->qci_sgi_status_get(netdev, sgi_handle, &sgistat);
+ if (valid < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, valid);
+ return valid;
+ }
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ /* Down one netlink attribute level */
+ sgiattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_SGI);
+ if (!sgiattr)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_QCI_SGI_ATTR_INDEX, sgi_handle))
+ goto err;
+
+ /* Gate enable */
+ if (valid == 1) {
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_ENABLE))
+ goto err;
+ } else {
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_DISABLE))
+ goto err;
+ }
+
+ if (nla_put_u32(rep_skb, TSN_QCI_SGI_ATTR_TICKG,
+ sgistat.tick_granularity) ||
+ NLA_PUT_U64(rep_skb, TSN_QCI_SGI_ATTR_CCTIME,
+ sgistat.config_change_time) ||
+ NLA_PUT_U64(rep_skb, TSN_QCI_SGI_ATTR_CUTIME,
+ sgistat.current_time) ||
+ NLA_PUT_U64(rep_skb, TSN_QCI_SGI_ATTR_CCERROR,
+ sgistat.config_change_error))
+ goto err;
+
+ if (sgistat.config_pending)
+ if (nla_put_flag(rep_skb, TSN_QCI_SGI_ATTR_CPENDING))
+ goto err;
+
+ /* operation data */
+ operattr = nla_nest_start_noflag(rep_skb, TSN_QCI_SGI_ATTR_OPERENTRY);
+ if (!operattr)
+ goto err;
+
+ if (sgistat.oper.gate_states)
+ if (nla_put_flag(rep_skb, TSN_SGI_ATTR_CTRL_INITSTATE))
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_SGI_ATTR_CTRL_CYTIME,
+ sgistat.oper.cycle_time) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_CTRL_CYTIMEEX,
+ sgistat.oper.cycle_time_extension) ||
+ NLA_PUT_U64(rep_skb, TSN_SGI_ATTR_CTRL_BTIME,
+ sgistat.oper.base_time) ||
+ nla_put_u8(rep_skb, TSN_SGI_ATTR_CTRL_INITIPV,
+ sgistat.oper.init_ipv))
+ goto err;
+
+ /* Loop list */
+ listcount = sgistat.oper.control_list_length;
+ if (!listcount)
+ goto out1;
+
+ if (!sgistat.oper.gcl) {
+ pr_err("error: list lenghth is not zero!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ gcl = sgistat.oper.gcl;
+
+ /* loop list */
+ for (i = 0; i < listcount; i++) {
+ s8 ipv;
+ u32 ti, omax;
+
+ if (!(gcl + i)) {
+ pr_err("error: list count too big\n");
+ ret = -EINVAL;
+ kfree(sgistat.oper.gcl);
+ goto err;
+ }
+
+ /* Operation entry */
+ sglattr = nla_nest_start_noflag(rep_skb,
+ TSN_SGI_ATTR_CTRL_GCLENTRY);
+ if (!sglattr)
+ goto err;
+ ipv = (gcl + i)->ipv;
+ ti = (gcl + i)->time_interval;
+ omax = (gcl + i)->octet_max;
+
+ if ((gcl + i)->gate_state)
+ if (nla_put_flag(rep_skb, TSN_SGI_ATTR_GCL_GATESTATE))
+ goto err;
+
+ if (nla_put_s8(rep_skb, TSN_SGI_ATTR_GCL_IPV, ipv) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_GCL_INTERVAL, ti) ||
+ nla_put_u32(rep_skb, TSN_SGI_ATTR_GCL_OCTMAX, omax))
+ goto err;
+
+ /* End operation entry */
+ nla_nest_end(rep_skb, sglattr);
+ }
+
+ kfree(sgistat.oper.gcl);
+ if (nla_put_u8(rep_skb, TSN_SGI_ATTR_CTRL_LEN, listcount))
+ goto err;
+out1:
+ /* End operation */
+ nla_nest_end(rep_skb, operattr);
+
+ nla_nest_end(rep_skb, sgiattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qci_sgi_status_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_sgi_status_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_fmi_set(struct genl_info *info)
+{
+ struct nlattr *na, *fmi[TSN_QCI_FMI_ATTR_MAX + 1];
+ u32 index;
+ int ret;
+ struct net_device *netdev;
+ struct tsn_qci_psfp_fmi fmiconf;
+ const struct tsn_ops *tsnops;
+ bool enable = 0;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&fmiconf, 0, sizeof(struct tsn_qci_psfp_fmi));
+
+ if (!info->attrs[TSN_ATTR_QCI_FMI])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QCI_FMI];
+
+ ret = NLA_PARSE_NESTED(fmi, TSN_QCI_FMI_ATTR_MAX, na, qci_fmi_policy);
+ if (ret) {
+ pr_info("tsn: parse value TSN_QCI_FMI_ATTR_MAX error.");
+ return -EINVAL;
+ }
+
+ if (!fmi[TSN_QCI_FMI_ATTR_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_INDEX]);
+
+ if (fmi[TSN_QCI_FMI_ATTR_DISABLE])
+ goto loaddev;
+
+ enable = 1;
+
+ if (fmi[TSN_QCI_FMI_ATTR_CIR])
+ fmiconf.cir = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_CIR]);
+
+ if (fmi[TSN_QCI_FMI_ATTR_CBS])
+ fmiconf.cbs = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_CBS]);
+
+ if (fmi[TSN_QCI_FMI_ATTR_EIR])
+ fmiconf.eir = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_EIR]);
+
+ if (fmi[TSN_QCI_FMI_ATTR_EBS])
+ fmiconf.ebs = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_EBS]);
+
+ if (fmi[TSN_QCI_FMI_ATTR_CF])
+ fmiconf.cf = 1;
+
+ if (fmi[TSN_QCI_FMI_ATTR_CM])
+ fmiconf.cm = 1;
+
+ if (fmi[TSN_QCI_FMI_ATTR_DROPYL])
+ fmiconf.drop_on_yellow = 1;
+
+ if (fmi[TSN_QCI_FMI_ATTR_MAREDEN])
+ fmiconf.mark_red_enable = 1;
+
+ if (fmi[TSN_QCI_FMI_ATTR_MARED])
+ fmiconf.mark_red = 1;
+
+loaddev:
+
+ if (!tsnops->qci_fmi_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EINVAL;
+ }
+
+ ret = tsnops->qci_fmi_set(netdev, index, enable, &fmiconf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+ }
+
+ ret = tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, 0);
+
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int tsn_qci_fmi_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_fmi_set(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qci_fmi_get(struct genl_info *info)
+{
+ struct nlattr *na, *fmi[TSN_QCI_FMI_ATTR_MAX + 1], *fmiattr;
+ u32 index;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct net_device *netdev;
+ struct tsn_qci_psfp_fmi fmiconf;
+ struct tsn_qci_psfp_fmi_counters counters;
+ const struct tsn_ops *tsnops;
+ struct genlmsghdr *genlhdr;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_QCI_FMI])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QCI_FMI];
+
+ ret = NLA_PARSE_NESTED(fmi, TSN_QCI_FMI_ATTR_MAX,
+ na, qci_fmi_policy);
+ if (ret) {
+ pr_info("tsn: parse value TSN_QCI_FMI_ATTR_MAX error.");
+ return -EINVAL;
+ }
+
+ if (!fmi[TSN_QCI_FMI_ATTR_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(fmi[TSN_QCI_FMI_ATTR_INDEX]);
+
+ /* Get data from device */
+ memset(&fmiconf, 0, sizeof(struct tsn_qci_psfp_fmi));
+ memset(&counters, 0, sizeof(struct tsn_qci_psfp_fmi_counters));
+
+ if (!tsnops->qci_fmi_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EINVAL;
+ }
+
+ ret = tsnops->qci_fmi_get(netdev, index, &fmiconf, &counters);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+ }
+
+ genlhdr = info->genlhdr;
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ fmiattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QCI_FMI);
+ if (!fmiattr)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_QCI_FMI_ATTR_INDEX, index) ||
+ nla_put_u32(rep_skb, TSN_QCI_FMI_ATTR_CIR, fmiconf.cir) ||
+ nla_put_u32(rep_skb, TSN_QCI_FMI_ATTR_CBS, fmiconf.cbs) ||
+ nla_put_u32(rep_skb, TSN_QCI_FMI_ATTR_EIR, fmiconf.eir) ||
+ nla_put_u32(rep_skb, TSN_QCI_FMI_ATTR_EBS, fmiconf.ebs))
+ goto err;
+
+ if (fmiconf.cf)
+ if (nla_put_flag(rep_skb, TSN_QCI_FMI_ATTR_CF))
+ goto err;
+
+ if (fmiconf.cm)
+ if (nla_put_flag(rep_skb, TSN_QCI_FMI_ATTR_CM))
+ goto err;
+
+ if (fmiconf.drop_on_yellow)
+ if (nla_put_flag(rep_skb, TSN_QCI_FMI_ATTR_DROPYL))
+ goto err;
+
+ if (fmiconf.mark_red_enable)
+ if (nla_put_flag(rep_skb, TSN_QCI_FMI_ATTR_MAREDEN))
+ goto err;
+
+ if (fmiconf.mark_red)
+ if (nla_put_flag(rep_skb, TSN_QCI_FMI_ATTR_MAREDEN))
+ goto err;
+
+ if (nla_put(rep_skb, TSN_QCI_FMI_ATTR_COUNTERS,
+ sizeof(struct tsn_qci_psfp_fmi_counters), &counters))
+ goto err;
+
+ nla_nest_end(rep_skb, fmiattr);
+
+ tsn_send_reply(rep_skb, info);
+
+ return 0;
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qci_fmi_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qci_fmi_get(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qbv_set(struct genl_info *info)
+{
+ struct nlattr *na, *na1;
+ struct nlattr *qbv_table;
+ struct nlattr *qbv[TSN_QBV_ATTR_MAX + 1];
+ struct nlattr *qbvctrl[TSN_QBV_ATTR_CTRL_MAX + 1];
+ int rem;
+ int ret = 0;
+ struct net_device *netdev;
+ struct tsn_qbv_conf qbvconfig;
+ const struct tsn_ops *tsnops;
+ struct tsn_qbv_entry *gatelist = NULL;
+ int count = 0;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&qbvconfig, 0, sizeof(struct tsn_qbv_conf));
+
+ if (!info->attrs[TSN_ATTR_QBV])
+ return -EINVAL;
+
+ na = info->attrs[TSN_ATTR_QBV];
+
+ ret = NLA_PARSE_NESTED(qbv, TSN_QBV_ATTR_MAX, na, qbv_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (qbv[TSN_QBV_ATTR_ENABLE])
+ qbvconfig.gate_enabled = 1;
+ else
+ goto setdrive;
+
+ if (qbv[TSN_QBV_ATTR_CONFIGCHANGE])
+ qbvconfig.config_change = 1;
+
+ if (!qbv[TSN_QBV_ATTR_ADMINENTRY]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -1;
+ }
+
+ na1 = qbv[TSN_QBV_ATTR_ADMINENTRY];
+ ret = NLA_PARSE_NESTED(qbvctrl, TSN_QBV_ATTR_CTRL_MAX,
+ na1, qbv_ctrl_policy);
+ if (ret)
+ return -EINVAL;
+
+ if (qbvctrl[TSN_QBV_ATTR_CTRL_CYCLETIME]) {
+ qbvconfig.admin.cycle_time =
+ nla_get_u32(qbvctrl[TSN_QBV_ATTR_CTRL_CYCLETIME]);
+ }
+
+ if (qbvctrl[TSN_QBV_ATTR_CTRL_CYCLETIMEEXT]) {
+ qbvconfig.admin.cycle_time_extension =
+ nla_get_u32(qbvctrl[TSN_QBV_ATTR_CTRL_CYCLETIMEEXT]);
+ }
+
+ if (qbvctrl[TSN_QBV_ATTR_CTRL_BASETIME]) {
+ qbvconfig.admin.base_time =
+ nla_get_u64(qbvctrl[TSN_QBV_ATTR_CTRL_BASETIME]);
+ }
+
+ if (qbvctrl[TSN_QBV_ATTR_CTRL_GATESTATE]) {
+ qbvconfig.admin.gate_states =
+ nla_get_u8(qbvctrl[TSN_QBV_ATTR_CTRL_GATESTATE]);
+ }
+
+ if (qbvctrl[TSN_QBV_ATTR_CTRL_LISTCOUNT]) {
+ int listcount;
+
+ listcount = nla_get_u32(qbvctrl[TSN_QBV_ATTR_CTRL_LISTCOUNT]);
+
+ qbvconfig.admin.control_list_length = listcount;
+
+ gatelist = kmalloc_array(listcount,
+ sizeof(*gatelist),
+ GFP_KERNEL);
+
+ nla_for_each_nested(qbv_table, na1, rem) {
+ struct nlattr *qbv_entry[TSN_QBV_ATTR_ENTRY_MAX + 1];
+
+ if (nla_type(qbv_table) != TSN_QBV_ATTR_CTRL_LISTENTRY)
+ continue;
+
+ ret = NLA_PARSE_NESTED(qbv_entry,
+ TSN_QBV_ATTR_ENTRY_MAX,
+ qbv_table, qbv_entry_policy);
+ if (ret)
+ return -EINVAL;
+
+ (gatelist + count)->gate_state =
+ nla_get_u8(qbv_entry[TSN_QBV_ATTR_ENTRY_GC]);
+ (gatelist + count)->time_interval =
+ nla_get_u32(qbv_entry[TSN_QBV_ATTR_ENTRY_TM]);
+ count++;
+ if (count > listcount)
+ break;
+ }
+ }
+
+ if (gatelist)
+ qbvconfig.admin.control_list = gatelist;
+
+setdrive:
+ if (!tsnops->qbv_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ goto err;
+ }
+
+ ret = tsnops->qbv_set(netdev, &qbvconfig);
+
+ /* send back */
+ if (ret < 0)
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ else
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+
+err:
+ kfree(gatelist);
+ return ret;
+}
+
+static int tsn_qbv_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME]) {
+ cmd_qbv_set(info);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int cmd_qbv_get(struct genl_info *info)
+{
+ struct nlattr *qbv, *qbvadminattr;
+ struct sk_buff *rep_skb;
+ int ret;
+ int len = 0, i = 0;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qbv_conf qbvconf;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ genlhdr = info->genlhdr;
+
+ memset(&qbvconf, 0, sizeof(struct tsn_qbv_conf));
+
+ if (!tsnops->qbv_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->qbv_get(netdev, &qbvconf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ qbv = nla_nest_start_noflag(rep_skb, TSN_ATTR_QBV);
+ if (!qbv)
+ goto err;
+
+ qbvadminattr = nla_nest_start_noflag(rep_skb, TSN_QBV_ATTR_ADMINENTRY);
+ if (!qbvadminattr)
+ goto err;
+
+ if (qbvconf.admin.control_list) {
+ len = qbvconf.admin.control_list_length;
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_LISTCOUNT, len))
+ goto err;
+
+ for (i = 0; i < len; i++) {
+ struct nlattr *qbv_table;
+ u8 gs;
+ u32 tp;
+ int glisttype = TSN_QBV_ATTR_CTRL_LISTENTRY;
+
+ gs = (qbvconf.admin.control_list + i)->gate_state;
+ tp = (qbvconf.admin.control_list + i)->time_interval;
+
+ qbv_table =
+ nla_nest_start_noflag(rep_skb, glisttype);
+ if (!qbv_table)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_ENTRY_ID, i) ||
+ nla_put_u8(rep_skb, TSN_QBV_ATTR_ENTRY_GC, gs) ||
+ nla_put_u32(rep_skb, TSN_QBV_ATTR_ENTRY_TM, tp))
+ goto err;
+ nla_nest_end(rep_skb, qbv_table);
+ }
+
+ if (qbvconf.admin.gate_states)
+ if (nla_put_u8(rep_skb, TSN_QBV_ATTR_CTRL_GATESTATE,
+ qbvconf.admin.gate_states))
+ goto err;
+
+ if (qbvconf.admin.cycle_time)
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_CYCLETIME,
+ qbvconf.admin.cycle_time))
+ goto err;
+
+ if (qbvconf.admin.cycle_time_extension)
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_CYCLETIMEEXT,
+ qbvconf.admin.cycle_time_extension))
+ goto err;
+
+ if (qbvconf.admin.base_time)
+ if (NLA_PUT_U64(rep_skb, TSN_QBV_ATTR_CTRL_BASETIME,
+ qbvconf.admin.base_time))
+ goto err;
+
+ kfree(qbvconf.admin.control_list);
+
+ } else {
+ pr_info("tsn: error get administrator data.");
+ }
+
+ nla_nest_end(rep_skb, qbvadminattr);
+
+ if (qbvconf.gate_enabled) {
+ if (nla_put_flag(rep_skb, TSN_QBV_ATTR_ENABLE))
+ goto err;
+ } else {
+ if (nla_put_flag(rep_skb, TSN_QBV_ATTR_DISABLE))
+ goto err;
+ }
+
+ if (qbvconf.maxsdu)
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_MAXSDU, qbvconf.maxsdu))
+ goto err;
+
+ if (qbvconf.config_change)
+ if (nla_put_flag(rep_skb, TSN_QBV_ATTR_CONFIGCHANGE))
+ goto err;
+
+ nla_nest_end(rep_skb, qbv);
+
+ tsn_send_reply(rep_skb, info);
+
+ return ret;
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int cmd_qbv_status_get(struct genl_info *info)
+{
+ struct nlattr *qbv, *qbvoperattr;
+ struct sk_buff *rep_skb;
+ int ret;
+ int len = 0, i = 0;
+ struct net_device *netdev;
+ struct genlmsghdr *genlhdr;
+ struct tsn_qbv_status qbvstatus;
+ const struct tsn_ops *tsnops;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ genlhdr = info->genlhdr;
+
+ memset(&qbvstatus, 0, sizeof(struct tsn_qbv_status));
+
+ if (!tsnops->qbv_get_status) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->qbv_get_status(netdev, &qbvstatus);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ qbv = nla_nest_start_noflag(rep_skb, TSN_ATTR_QBV);
+ if (!qbv)
+ goto err;
+
+ qbvoperattr = nla_nest_start_noflag(rep_skb, TSN_QBV_ATTR_OPERENTRY);
+ if (!qbvoperattr)
+ goto err;
+
+ if (qbvstatus.oper.control_list) {
+ len = qbvstatus.oper.control_list_length;
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_LISTCOUNT, len)) {
+ nla_nest_cancel(rep_skb, qbvoperattr);
+ goto err;
+ }
+
+ for (i = 0; i < len; i++) {
+ struct nlattr *qbv_table;
+ u8 gs;
+ u32 tp;
+ int glisttype = TSN_QBV_ATTR_CTRL_LISTENTRY;
+
+ gs = (qbvstatus.oper.control_list + i)->gate_state;
+ tp = (qbvstatus.oper.control_list + i)->time_interval;
+
+ qbv_table = nla_nest_start_noflag(rep_skb, glisttype);
+ if (!qbv_table)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_ENTRY_ID, i) ||
+ nla_put_u8(rep_skb, TSN_QBV_ATTR_ENTRY_GC, gs) ||
+ nla_put_u32(rep_skb, TSN_QBV_ATTR_ENTRY_TM, tp)) {
+ nla_nest_cancel(rep_skb, qbv_table);
+ goto err;
+ }
+
+ nla_nest_end(rep_skb, qbv_table);
+ }
+
+ if (qbvstatus.oper.gate_states) {
+ if (nla_put_u8(rep_skb, TSN_QBV_ATTR_CTRL_GATESTATE,
+ qbvstatus.oper.gate_states))
+ goto err;
+ }
+
+ if (qbvstatus.oper.cycle_time) {
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_CYCLETIME,
+ qbvstatus.oper.cycle_time))
+ goto err;
+ }
+
+ if (qbvstatus.oper.cycle_time_extension) {
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_CTRL_CYCLETIMEEXT,
+ qbvstatus.oper.cycle_time_extension))
+ goto err;
+ }
+
+ if (qbvstatus.oper.base_time) {
+ if (NLA_PUT_U64(rep_skb, TSN_QBV_ATTR_CTRL_BASETIME,
+ qbvstatus.oper.base_time))
+ goto err;
+ }
+
+ kfree(qbvstatus.oper.control_list);
+ } else {
+ pr_info("tsn: error get operation list data.");
+ }
+
+ nla_nest_end(rep_skb, qbvoperattr);
+
+ if (qbvstatus.config_change_time) {
+ if (NLA_PUT_U64(rep_skb, TSN_QBV_ATTR_CONFIGCHANGETIME,
+ qbvstatus.config_change_time))
+ goto err;
+ }
+
+ if (qbvstatus.tick_granularity) {
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_GRANULARITY,
+ qbvstatus.tick_granularity))
+ goto err;
+ }
+
+ if (qbvstatus.current_time) {
+ if (NLA_PUT_U64(rep_skb, TSN_QBV_ATTR_CURRENTTIME,
+ qbvstatus.current_time))
+ goto err;
+ }
+
+ if (qbvstatus.config_pending) {
+ if (nla_put_flag(rep_skb, TSN_QBV_ATTR_CONFIGPENDING))
+ goto err;
+ }
+
+ if (qbvstatus.config_change_error) {
+ if (NLA_PUT_U64(rep_skb, TSN_QBV_ATTR_CONFIGCHANGEERROR,
+ qbvstatus.config_change_error))
+ goto err;
+ }
+
+ if (qbvstatus.supported_list_max) {
+ if (nla_put_u32(rep_skb, TSN_QBV_ATTR_LISTMAX,
+ qbvstatus.supported_list_max))
+ goto err;
+ }
+
+ nla_nest_end(rep_skb, qbv);
+
+ tsn_send_reply(rep_skb, info);
+
+ return ret;
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qbv_status_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME])
+ cmd_qbv_status_get(info);
+
+ return 0;
+}
+
+static int tsn_qbv_get(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME])
+ cmd_qbv_get(info);
+
+ return 0;
+}
+
+static int tsn_cbs_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *cbsa[TSN_CBS_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ u8 tc, bw;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_CBS]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_CBS];
+
+ if (!tsnops->cbs_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = NLA_PARSE_NESTED(cbsa, TSN_CBS_ATTR_MAX, na, cbs_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!cbsa[TSN_CBS_ATTR_TC_INDEX]) {
+ pr_err("tsn: no TSN_CBS_ATTR_TC_INDEX input\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+ tc = nla_get_u8(cbsa[TSN_CBS_ATTR_TC_INDEX]);
+
+ if (!cbsa[TSN_CBS_ATTR_BW]) {
+ pr_err("tsn: no TSN_CBS_ATTR_BW input\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ bw = nla_get_u8(cbsa[TSN_CBS_ATTR_BW]);
+ if (bw > 100) {
+ pr_err("tsn: TSN_CBS_ATTR_BW isn't in the range of 0~100\n");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ ret = tsnops->cbs_set(netdev, tc, bw);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_cbs_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na, *cbsattr;
+ struct nlattr *cbsa[TSN_CBS_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct genlmsghdr *genlhdr;
+ u8 tc;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_CBS]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!tsnops->cbs_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ na = info->attrs[TSN_ATTR_CBS];
+ ret = NLA_PARSE_NESTED(cbsa, TSN_CBS_ATTR_MAX, na, cbs_policy);
+ if (ret) {
+ pr_err("tsn: parse value TSN_CBS_ATTR_MAX error.");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ /* Get status data from device */
+ genlhdr = info->genlhdr;
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd, &rep_skb,
+ NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ goto err;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ cbsattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_CBS);
+ if (!cbsattr)
+ goto err;
+
+ if (!cbsa[TSN_CBS_ATTR_TC_INDEX]) {
+ pr_err("tsn: must to specify the TSN_CBS_ATTR_TC_INDEX\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ tc = nla_get_u8(cbsa[TSN_CBS_ATTR_TC_INDEX]);
+
+ ret = tsnops->cbs_get(netdev, tc);
+ if (ret < 0) {
+ pr_err("tsn: cbs_get return error\n");
+ goto err;
+ }
+
+ if (nla_put_u8(rep_skb, TSN_CBS_ATTR_BW, ret & 0XF))
+ goto err;
+
+ nla_nest_end(rep_skb, cbsattr);
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int cmd_qbu_set(struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *qbua[TSN_QBU_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ u8 preemptible = 0;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_QBU]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_QBU];
+
+ ret = NLA_PARSE_NESTED(qbua, TSN_QBU_ATTR_MAX, na, qbu_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (qbua[TSN_QBU_ATTR_ADMIN_STATE])
+ preemptible = nla_get_u8(qbua[TSN_QBU_ATTR_ADMIN_STATE]);
+ else
+ pr_info("No preemptible TSN_QBU_ATTR_ADMIN_STATE config!\n");
+
+ if (!tsnops->qbu_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EINVAL;
+ }
+
+ ret = tsnops->qbu_set(netdev, preemptible);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_qbu_set(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME])
+ return cmd_qbu_set(info);
+
+ return -1;
+}
+
+static int cmd_qbu_get_status(struct genl_info *info)
+{
+ struct nlattr *qbuattr;
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct genlmsghdr *genlhdr;
+ struct tsn_preempt_status pps;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ /* Get status data from device */
+ genlhdr = info->genlhdr;
+
+ memset(&pps, 0, sizeof(struct tsn_preempt_status));
+
+ if (!tsnops->qbu_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->qbu_get(netdev, &pps);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ qbuattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_QBU);
+ if (!qbuattr)
+ goto err;
+
+ if (nla_put_u8(rep_skb, TSN_QBU_ATTR_ADMIN_STATE, pps.admin_state) ||
+ nla_put_u32(rep_skb,
+ TSN_QBU_ATTR_HOLD_ADVANCE, pps.hold_advance) ||
+ nla_put_u32(rep_skb,
+ TSN_QBU_ATTR_RELEASE_ADVANCE, pps.release_advance))
+ goto err;
+
+ if (pps.preemption_active) {
+ if (nla_put_flag(rep_skb, TSN_QBU_ATTR_ACTIVE))
+ goto err;
+ }
+
+ if (nla_put_u8(rep_skb, TSN_QBU_ATTR_HOLD_REQUEST, pps.hold_request))
+ goto err;
+
+ nla_nest_end(rep_skb, qbuattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_qbu_get_status(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TSN_ATTR_IFNAME])
+ return cmd_qbu_get_status(info);
+
+ return -1;
+}
+
+static int tsn_tsd_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *ntsd[TSN_TSD_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct tsn_tsd tsd;
+ int ret;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ memset(&tsd, 0, sizeof(struct tsn_tsd));
+
+ if (!info->attrs[TSN_ATTR_TSD]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_TSD];
+
+ ret = NLA_PARSE_NESTED(ntsd, TSN_TSD_ATTR_MAX, na, tsd_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!tsnops->tsd_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -EINVAL;
+ }
+
+ if (nla_get_flag(ntsd[TSN_TSD_ATTR_DISABLE])) {
+ tsd.enable = false;
+ } else {
+ if (ntsd[TSN_TSD_ATTR_PERIOD])
+ tsd.period = nla_get_u32(ntsd[TSN_TSD_ATTR_PERIOD]);
+
+ if (!tsd.period) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (ntsd[TSN_TSD_ATTR_MAX_FRM_NUM])
+ tsd.maxFrameNum =
+ nla_get_u32(ntsd[TSN_TSD_ATTR_MAX_FRM_NUM]);
+
+ if (ntsd[TSN_TSD_ATTR_SYN_IMME])
+ tsd.syn_flag = 2;
+ else
+ tsd.syn_flag = 1;
+
+ tsd.enable = true;
+ }
+
+ ret = tsnops->tsd_set(netdev, &tsd);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_tsd_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na, *tsdattr;
+ struct nlattr *tsda[TSN_TSD_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct sk_buff *rep_skb;
+ int ret;
+ struct genlmsghdr *genlhdr;
+ struct tsn_tsd_status tts;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_TSD]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ if (!tsnops->tsd_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = tsnops->tsd_get(netdev, &tts);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ na = info->attrs[TSN_ATTR_TSD];
+
+ ret = NLA_PARSE_NESTED(tsda, TSN_TSD_ATTR_MAX,
+ na, tsd_policy);
+ if (ret) {
+ pr_err("tsn: parse value TSN_TSD_ATTR_MAX error.");
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ /* Get status data from device */
+ genlhdr = info->genlhdr;
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd, &rep_skb,
+ NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ tsdattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_TSD);
+ if (!tsdattr)
+ goto err;
+
+ if (nla_put_u32(rep_skb, TSN_TSD_ATTR_PERIOD, tts.period) ||
+ nla_put_u32(rep_skb, TSN_TSD_ATTR_MAX_FRM_NUM, tts.maxFrameNum) ||
+ nla_put_u32(rep_skb, TSN_TSD_ATTR_CYCLE_NUM, tts.cycleNum) ||
+ nla_put_u32(rep_skb, TSN_TSD_ATTR_LOSS_STEPS, tts.loss_steps) ||
+ nla_put_u32(rep_skb, TSN_TSD_ATTR_MAX_FRM_NUM, tts.maxFrameNum))
+ goto err;
+
+ if (!tts.enable) {
+ if (nla_put_flag(rep_skb, TSN_TSD_ATTR_DISABLE))
+ goto err;
+ } else {
+ if (nla_put_flag(rep_skb, TSN_TSD_ATTR_ENABLE))
+ goto err;
+ }
+
+ if (tts.flag == 2)
+ if (nla_put_flag(rep_skb, TSN_TSD_ATTR_SYN_IMME))
+ goto err;
+
+ nla_nest_end(rep_skb, tsdattr);
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_ct_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *cta[TSN_CT_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ u8 queue_stat;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_CT]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_CT];
+
+ if (!tsnops->ct_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = NLA_PARSE_NESTED(cta, TSN_CT_ATTR_MAX,
+ na, ct_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ queue_stat = nla_get_u8(cta[TSN_CT_ATTR_QUEUE_STATE]);
+
+ ret = tsnops->ct_set(netdev, queue_stat);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_cbgen_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *cbgena[TSN_CBGEN_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ u32 index;
+ struct tsn_seq_gen_conf sg_conf;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_CBGEN]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_CBGEN];
+
+ if (!tsnops->cbgen_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = NLA_PARSE_NESTED(cbgena, TSN_CBGEN_ATTR_MAX,
+ na, cbgen_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ index = nla_get_u32(cbgena[TSN_CBGEN_ATTR_INDEX]);
+
+ memset(&sg_conf, 0, sizeof(struct tsn_seq_gen_conf));
+ sg_conf.iport_mask = nla_get_u8(cbgena[TSN_CBGEN_ATTR_PORT_MASK]);
+ sg_conf.split_mask = nla_get_u8(cbgena[TSN_CBGEN_ATTR_SPLIT_MASK]);
+ sg_conf.seq_len = nla_get_u8(cbgena[TSN_CBGEN_ATTR_SEQ_LEN]);
+ sg_conf.seq_num = nla_get_u32(cbgena[TSN_CBGEN_ATTR_SEQ_NUM]);
+
+ ret = tsnops->cbgen_set(netdev, index, &sg_conf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_cbrec_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *cbreca[TSN_CBREC_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ u32 index;
+ struct tsn_seq_rec_conf sr_conf;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_CBREC]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_CBREC];
+
+ if (!tsnops->cbrec_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = NLA_PARSE_NESTED(cbreca, TSN_CBREC_ATTR_MAX,
+ na, cbrec_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ index = nla_get_u32(cbreca[TSN_CBREC_ATTR_INDEX]);
+
+ memset(&sr_conf, 0, sizeof(struct tsn_seq_rec_conf));
+ sr_conf.seq_len = nla_get_u8(cbreca[TSN_CBREC_ATTR_SEQ_LEN]);
+ sr_conf.his_len = nla_get_u8(cbreca[TSN_CBREC_ATTR_HIS_LEN]);
+ sr_conf.rtag_pop_en = nla_get_flag(cbreca[TSN_CBREC_ATTR_TAG_POP_EN]);
+
+ ret = tsnops->cbrec_set(netdev, index, &sr_conf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+ return 0;
+}
+
+static int tsn_cbstatus_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *cba[TSN_CBSTAT_ATTR_MAX + 1];
+ struct nlattr *cbattr;
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ struct sk_buff *rep_skb;
+ int ret;
+ unsigned int index;
+ struct genlmsghdr *genlhdr;
+ struct tsn_cb_status cbstat;
+ struct tsn_port *port;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ /* Get status data from device */
+ genlhdr = info->genlhdr;
+
+ memset(&cbstat, 0, sizeof(struct tsn_cb_status));
+
+ if (!tsnops->cb_get) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ na = info->attrs[TSN_ATTR_CBSTAT];
+ ret = NLA_PARSE_NESTED(cba, TSN_CBSTAT_ATTR_MAX,
+ na, cbstat_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ index = nla_get_u32(cba[TSN_CBSTAT_ATTR_INDEX]);
+
+ ret = tsnops->cb_get(netdev, index, &cbstat);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ /* Form netlink reply data */
+ ret = tsn_prepare_reply(info, genlhdr->cmd,
+ &rep_skb, NLMSG_ALIGN(MAX_ATTR_SIZE));
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_string(rep_skb, TSN_ATTR_IFNAME, netdev->name))
+ goto err;
+
+ cbattr = nla_nest_start_noflag(rep_skb, TSN_ATTR_CBSTAT);
+ if (!cbattr)
+ goto err;
+
+ if (nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_GEN_REC, cbstat.gen_rec) ||
+ nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_ERR, cbstat.err) ||
+ nla_put_u32(rep_skb, TSN_CBSTAT_ATTR_SEQ_NUM,
+ cbstat.seq_num) ||
+ nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_SEQ_LEN, cbstat.seq_len) ||
+ nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_SPLIT_MASK,
+ cbstat.split_mask) ||
+ nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_PORT_MASK,
+ cbstat.iport_mask) ||
+ nla_put_u8(rep_skb, TSN_CBSTAT_ATTR_HIS_LEN, cbstat.his_len) ||
+ nla_put_u32(rep_skb, TSN_CBSTAT_ATTR_SEQ_HIS,
+ cbstat.seq_his))
+ goto err;
+
+ nla_nest_end(rep_skb, cbattr);
+
+ return tsn_send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ tsn_simple_reply(info, TSN_CMD_REPLY, netdev->name, ret);
+ return ret;
+}
+
+static int tsn_dscp_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct nlattr *dscpa[TSN_DSCP_ATTR_MAX + 1];
+ struct net_device *netdev;
+ const struct tsn_ops *tsnops;
+ int ret;
+ bool enable = 0;
+ struct tsn_port *port;
+ int dscp_ix;
+ struct tsn_qos_switch_dscp_conf dscp_conf;
+
+ port = tsn_init_check(info, &netdev);
+ if (!port)
+ return -ENODEV;
+
+ tsnops = port->tsnops;
+
+ if (!info->attrs[TSN_ATTR_DSCP]) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ na = info->attrs[TSN_ATTR_DSCP];
+
+ if (!tsnops->dscp_set) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EPERM);
+ return -1;
+ }
+
+ ret = NLA_PARSE_NESTED(dscpa, TSN_DSCP_ATTR_MAX,
+ na, dscp_policy);
+ if (ret) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, -EINVAL);
+ return -EINVAL;
+ }
+
+ enable = 1;
+ if (dscpa[TSN_DSCP_ATTR_DISABLE])
+ enable = 0;
+ dscp_ix = nla_get_u32(dscpa[TSN_DSCP_ATTR_INDEX]);
+ dscp_conf.cos = nla_get_u32(dscpa[TSN_DSCP_ATTR_COS]);
+ dscp_conf.dpl = nla_get_u32(dscpa[TSN_DSCP_ATTR_DPL]);
+ ret = tsnops->dscp_set(netdev, enable, dscp_ix, &dscp_conf);
+ if (ret < 0) {
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, ret);
+ return ret;
+ }
+
+ tsn_simple_reply(info, TSN_CMD_REPLY,
+ netdev->name, 0);
+
+ return 0;
+}
+
+static const struct genl_ops tsnnl_ops[] = {
+ {
+ .cmd = TSN_CMD_ECHO,
+ .doit = tsn_echo_cmd,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CAP_GET,
+ .doit = tsn_cap_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QBV_SET,
+ .doit = tsn_qbv_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QBV_GET,
+ .doit = tsn_qbv_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QBV_GET_STATUS,
+ .doit = tsn_qbv_status_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CB_STREAMID_SET,
+ .doit = tsn_cb_streamid_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CB_STREAMID_GET,
+ .doit = tsn_cb_streamid_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CB_STREAMID_GET_COUNTS,
+ .doit = tsn_cb_streamid_counters_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_CAP_GET,
+ .doit = tsn_qci_cap_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SFI_SET,
+ .doit = tsn_qci_sfi_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SFI_GET,
+ .doit = tsn_qci_sfi_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SFI_GET_COUNTS,
+ .doit = tsn_qci_sfi_counters_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SGI_SET,
+ .doit = tsn_qci_sgi_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SGI_GET,
+ .doit = tsn_qci_sgi_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_SGI_GET_STATUS,
+ .doit = tsn_qci_sgi_status_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_FMI_SET,
+ .doit = tsn_qci_fmi_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QCI_FMI_GET,
+ .doit = tsn_qci_fmi_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CBS_SET,
+ .doit = tsn_cbs_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CBS_GET,
+ .doit = tsn_cbs_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QBU_SET,
+ .doit = tsn_qbu_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_QBU_GET_STATUS,
+ .doit = tsn_qbu_get_status,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_TSD_SET,
+ .doit = tsn_tsd_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_TSD_GET,
+ .doit = tsn_tsd_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CT_SET,
+ .doit = tsn_ct_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CBGEN_SET,
+ .doit = tsn_cbgen_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CBREC_SET,
+ .doit = tsn_cbrec_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_CBSTAT_GET,
+ .doit = tsn_cbstatus_get,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TSN_CMD_DSCP_SET,
+ .doit = tsn_dscp_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static const struct genl_multicast_group tsn_mcgrps[] = {
+ [TSN_MCGRP_QBV] = { .name = TSN_MULTICAST_GROUP_QBV},
+ [TSN_MCGRP_QCI] = { .name = TSN_MULTICAST_GROUP_QCI},
+};
+
+static struct genl_family tsn_family = {
+ .name = TSN_GENL_NAME,
+ .version = TSN_GENL_VERSION,
+ .maxattr = TSN_CMD_ATTR_MAX,
+ .module = THIS_MODULE,
+ .netnsok = true,
+ .ops = tsnnl_ops,
+ .n_ops = ARRAY_SIZE(tsnnl_ops),
+ .mcgrps = tsn_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(tsn_mcgrps),
+};
+
+int tsn_port_register(struct net_device *netdev,
+ struct tsn_ops *tsnops, u16 groupid)
+{
+ struct tsn_port *port;
+
+ if (list_empty(&port_list)) {
+ INIT_LIST_HEAD(&port_list);
+ } else {
+ list_for_each_entry(port, &port_list, list) {
+ if (port->netdev == netdev) {
+ pr_info("TSN device already registered!\n");
+ return -1;
+ }
+ }
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -1;
+
+ port->netdev = netdev;
+ port->groupid = groupid;
+ port->tsnops = tsnops;
+ port->nd.dev = netdev;
+
+ if (groupid < GROUP_OFFSET_SWITCH)
+ port->type = TSN_ENDPOINT;
+ else
+ port->type = TSN_SWITCH;
+
+ list_add_tail(&port->list, &port_list);
+
+ if (tsnops && tsnops->device_init)
+ port->tsnops->device_init(netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsn_port_register);
+
+void tsn_port_unregister(struct net_device *netdev)
+{
+ struct tsn_port *p;
+
+ list_for_each_entry(p, &port_list, list) {
+ if (!p || !p->netdev)
+ continue;
+ if (p->netdev == netdev) {
+ if (p->tsnops->device_deinit)
+ p->tsnops->device_deinit(netdev);
+ list_del(&p->list);
+ kfree(p);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(tsn_port_unregister);
+
+static int tsn_multicast_to_user(unsigned long event,
+ struct tsn_notifier_info *tsn_info)
+{
+ struct sk_buff *skb;
+ struct genlmsghdr *nlh = NULL;
+ int res = 0;
+ struct tsn_qbv_conf *qbvdata;
+
+ /* If new attributes are added, please revisit this allocation */
+ skb = genlmsg_new(sizeof(*tsn_info), GFP_KERNEL);
+ if (!skb) {
+ pr_err("Allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ switch (event) {
+ case TSN_QBV_CONFIGCHANGETIME_ARRIVE:
+ nlh = genlmsg_put(skb, 0, 1, &tsn_family,
+ GFP_KERNEL, TSN_CMD_QBV_SET);
+ qbvdata = &tsn_info->ntdata.qbv_notify;
+ res = NLA_PUT_U64(skb, TSN_QBV_ATTR_CTRL_BASETIME,
+ qbvdata->admin.base_time);
+
+ if (res) {
+ pr_err("put data failure!\n");
+ goto done;
+ }
+
+ res = nla_put_u32(skb, TSN_QBV_ATTR_CTRL_CYCLETIME,
+ qbvdata->admin.cycle_time);
+ if (res) {
+ pr_err("put data failure!\n");
+ goto done;
+ }
+
+ if (qbvdata->gate_enabled)
+ res = nla_put_flag(skb, TSN_QBV_ATTR_ENABLE +
+ TSN_QBV_ATTR_CTRL_MAX);
+ else
+ res = nla_put_flag(skb, TSN_QBV_ATTR_DISABLE +
+ TSN_QBV_ATTR_CTRL_MAX);
+ if (res) {
+ pr_err("put data failure!\n");
+ goto done;
+ }
+
+ res = nla_put_u32(skb, TSN_QBV_ATTR_CTRL_UNSPEC,
+ tsn_info->dev->ifindex);
+ if (res) {
+ pr_err("put data failure!\n");
+ goto done;
+ }
+
+ break;
+ default:
+ pr_info("event not supportted!\n");
+ break;
+ }
+
+ if (!nlh)
+ goto done;
+
+ (void)genlmsg_end(skb, nlh);
+
+ res = genlmsg_multicast_allns(&tsn_family, skb, 0,
+ TSN_MCGRP_QBV, GFP_KERNEL);
+ skb = NULL;
+ if (res && res != -ESRCH) {
+ pr_err("genlmsg_multicast_allns error: %d\n", res);
+ goto done;
+ }
+
+ if (res == -ESRCH)
+ res = 0;
+
+done:
+ if (skb) {
+ nlmsg_free(skb);
+ skb = NULL;
+ }
+
+ return res;
+}
+
+/* called with RTNL or RCU */
+static int tsn_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct tsn_notifier_info *tsn_info;
+ int err = NOTIFY_DONE;
+
+ switch (event) {
+ case TSN_QBV_CONFIGCHANGETIME_ARRIVE:
+ tsn_info = ptr;
+ err = tsn_multicast_to_user(event, tsn_info);
+ if (err) {
+ err = notifier_from_errno(err);
+ break;
+ }
+ break;
+ default:
+ pr_info("event not supportted!\n");
+ break;
+ }
+
+ return err;
+}
+
+static struct notifier_block tsn_notifier = {
+ .notifier_call = tsn_event,
+};
+
+static int __init tsn_genetlink_init(void)
+{
+ int ret;
+
+ pr_info("tsn generic netlink module v%d init...\n", TSN_GENL_VERSION);
+
+ ret = genl_register_family(&tsn_family);
+
+ if (ret != 0) {
+ pr_info("failed to init tsn generic netlink example module\n");
+ return ret;
+ }
+
+ register_tsn_notifier(&tsn_notifier);
+
+ return 0;
+}
+
+static void __exit tsn_genetlink_exit(void)
+{
+ int ret;
+
+ ret = genl_unregister_family(&tsn_family);
+ if (ret != 0)
+ pr_info("failed to unregister family:%i\n", ret);
+
+ unregister_tsn_notifier(&tsn_notifier);
+}
+
+module_init(tsn_genetlink_init);
+module_exit(tsn_genetlink_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 65f918d29531..036e03b22066 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -152,6 +152,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
+ u32 tx_headroom = mr->tx_headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
u64 addr = mr->addr, size = mr->len;
@@ -209,6 +210,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
+ umem->tx_headroom = tx_headroom;
INIT_LIST_HEAD(&umem->xsk_dma_list);
refcount_set(&umem->users, 1);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 330dd498fc61..716fd22bc3ce 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1054,6 +1054,7 @@ struct xdp_umem_reg_v1 {
__u64 len; /* Length of packet data area */
__u32 chunk_size;
__u32 headroom;
+ __u32 tx_headroom;
};
static int xsk_setsockopt(struct socket *sock, int level, int optname,
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 2aa559f1c185..0c05f869728f 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -77,6 +77,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
pool->heads_cnt = umem->chunks;
pool->free_heads_cnt = umem->chunks;
pool->headroom = umem->headroom;
+ pool->tx_headroom = umem->tx_headroom;
pool->chunk_size = umem->chunk_size;
pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
pool->frame_len = umem->chunk_size - umem->headroom -
@@ -524,6 +525,93 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
}
EXPORT_SYMBOL(xp_alloc);
+static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 i, cached_cons, nb_entries;
+
+ if (max > pool->free_heads_cnt)
+ max = pool->free_heads_cnt;
+ max = xskq_cons_nb_entries(pool->fq, max);
+
+ cached_cons = pool->fq->cached_cons;
+ nb_entries = max;
+ i = max;
+ while (i--) {
+ struct xdp_buff_xsk *xskb;
+ u64 addr;
+ bool ok;
+
+ __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
+
+ ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
+ xp_check_aligned(pool, &addr);
+ if (unlikely(!ok)) {
+ pool->fq->invalid_descs++;
+ nb_entries--;
+ continue;
+ }
+
+ xskb = pool->free_heads[--pool->free_heads_cnt];
+ *xdp = &xskb->xdp;
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+ xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
+ ~XSK_NEXT_PG_CONTIG_MASK) + (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+ xdp++;
+ }
+
+ xskq_cons_release_n(pool->fq, max);
+ return nb_entries;
+}
+
+static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
+{
+ struct xdp_buff_xsk *xskb;
+ u32 i;
+
+ nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
+
+ i = nb_entries;
+ while (i--) {
+ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
+ list_del(&xskb->free_list_node);
+
+ *xdp = &xskb->xdp;
+ xdp++;
+ }
+ pool->free_list_cnt -= nb_entries;
+
+ return nb_entries;
+}
+
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 nb_entries1 = 0, nb_entries2;
+
+ if (unlikely(pool->dma_need_sync)) {
+ /* Slow path */
+ *xdp = xp_alloc(pool);
+ return !!*xdp;
+ }
+
+ if (unlikely(pool->free_list_cnt)) {
+ nb_entries1 = xp_alloc_reused(pool, xdp, max);
+ if (nb_entries1 == max)
+ return nb_entries1;
+
+ max -= nb_entries1;
+ xdp += nb_entries1;
+ }
+
+ nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
+ if (!nb_entries2)
+ pool->fq->queue_empty_descs++;
+
+ return nb_entries1 + nb_entries2;
+}
+EXPORT_SYMBOL(xp_alloc_batch);
+
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
if (pool->free_list_cnt >= count)
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index c014217f5fa7..ec314cd92542 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -65,6 +65,7 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
if (umem->zc)
du.flags |= XDP_DU_F_ZEROCOPY;
du.refs = refcount_read(&umem->users);
+ du.tx_headroom = umem->tx_headroom;
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
if (!err && pool && pool->fq)
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 6b4df83aa28f..e188fb03a97e 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -111,14 +111,18 @@ struct xsk_queue {
/* Functions that read and validate content from consumer rings. */
-static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+ u32 idx = cached_cons & q->ring_mask;
- if (q->cached_cons != q->cached_prod) {
- u32 idx = q->cached_cons & q->ring_mask;
+ *addr = ring->desc[idx];
+}
- *addr = ring->desc[idx];
+static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+{
+ if (q->cached_cons != q->cached_prod) {
+ __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
return true;
}