summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-07-14 22:49:30 -0700
committerDavid S. Miller <davem@davemloft.net>2008-07-14 22:49:30 -0700
commitbc1d0411b804ad190cdadabac48a10067f17b9e6 (patch)
tree0a38da614b6bb46f2ffa2378aca25043a2b33a7c /net
parent6aa895b047720f71ec4eb11452f7c3ce8426941f (diff)
vlan: deliver packets received with VLAN acceleration to network taps
When VLAN header stripping is used, packets currently bypass packet sockets (and other network taps) completely. For locally existing VLANs, they appear directly on the VLAN device, for unknown VLANs they are silently dropped. Add a new function netif_nit_deliver() to deliver incoming packets to all network interface taps and use it in __vlan_hwaccel_rx() to make VLAN packets visible on the underlying device. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/core/dev.c27
2 files changed, 31 insertions, 0 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 68df12d3664b..916061f681b6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,6 +14,9 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
return NET_RX_DROP;
}
+ skb->vlan_tci = vlan_tci;
+ netif_nit_deliver(skb);
+
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
@@ -22,6 +25,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
return NET_RX_SUCCESS;
}
skb->dev->last_rx = jiffies;
+ skb->vlan_tci = 0;
stats = &skb->dev->stats;
stats->rx_packets++;
diff --git a/net/core/dev.c b/net/core/dev.c
index a29a359b15d1..feaab4898a5b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2068,6 +2068,33 @@ out:
}
#endif
+/*
+ * netif_nit_deliver - deliver received packets to network taps
+ * @skb: buffer
+ *
+ * This function is used to deliver incoming packets to network
+ * taps. It should be used when the normal netif_receive_skb path
+ * is bypassed, for example because of VLAN acceleration.
+ */
+void netif_nit_deliver(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+
+ if (list_empty(&ptype_all))
+ return;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev)
+ deliver_skb(skb, ptype, skb->dev);
+ }
+ rcu_read_unlock();
+}
+
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process