summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/p8022.c3
-rw-r--r--net/802/stp.c2
-rw-r--r--net/802/tr.c670
-rw-r--r--net/8021q/vlan.c10
-rw-r--r--net/8021q/vlan_core.c3
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/Kconfig11
-rw-r--r--net/Makefile2
-rw-r--r--net/atm/ioctl.c8
-rw-r--r--net/atm/lec.c144
-rw-r--r--net/atm/lec.h1
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/batman-adv/bat_debugfs.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.c183
-rw-r--r--net/batman-adv/bat_sysfs.c100
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h2
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/hard-interface.c123
-rw-r--r--net/batman-adv/main.c124
-rw-r--r--net/batman-adv/main.h8
-rw-r--r--net/batman-adv/originator.c52
-rw-r--r--net/batman-adv/originator.h7
-rw-r--r--net/batman-adv/packet.h1
-rw-r--r--net/batman-adv/routing.c49
-rw-r--r--net/batman-adv/routing.h4
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/translation-table.c23
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h19
-rw-r--r--net/batman-adv/unicast.c8
-rw-r--r--net/bluetooth/bnep/core.c6
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c14
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c11
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/core/dev.c83
-rw-r--r--net/core/drop_monitor.c56
-rw-r--r--net/core/ethtool.c71
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/net_namespace.c6
-rw-r--r--net/core/netprio_cgroup.c6
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c8
-rw-r--r--net/core/skbuff.c197
-rw-r--r--net/core/sock.c39
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_fib.c5
-rw-r--r--net/decnet/dn_neigh.c22
-rw-r--r--net/decnet/dn_nsp_in.c11
-rw-r--r--net/decnet/dn_nsp_out.c5
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c3
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/econet/Kconfig36
-rw-r--r--net/econet/Makefile7
-rw-r--r--net/econet/af_econet.c1172
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/nl-phy.c9
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c22
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c20
-rw-r--r--net/ipv4/inet_timewait_sock.c4
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_input.c13
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ipconfig.c18
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c26
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c8
-rw-r--r--net/ipv4/route.c57
-rw-r--r--net/ipv4/tcp.c90
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_hybla.c10
-rw-r--r--net/ipv4/tcp_input.c376
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_minisocks.c24
-rw-r--r--net/ipv4/tcp_output.c82
-rw-r--r--net/ipv4/udp.c8
-rw-r--r--net/ipv6/Kconfig4
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/addrlabel.c26
-rw-r--r--net/ipv6/af_inet6.c53
-rw-r--r--net/ipv6/ah6.c30
-rw-r--r--net/ipv6/anycast.c12
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/esp6.c14
-rw-r--r--net/ipv6/exthdrs.c73
-rw-r--r--net/ipv6/exthdrs_core.c2
-rw-r--r--net/ipv6/icmp.c22
-rw-r--r--net/ipv6/ip6_fib.c25
-rw-r--r--net/ipv6/ip6_flowlabel.c24
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_output.c16
-rw-r--r--net/ipv6/ip6_tunnel.c58
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/mcast.c69
-rw-r--r--net/ipv6/mip6.c32
-rw-r--r--net/ipv6/ndisc.c243
-rw-r--r--net/ipv6/netfilter/ip6_tables.c39
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c4
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c4
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c4
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c4
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c41
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/ipv6/tunnel6.c10
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipx/af_ipx.c14
-rw-r--r--net/l2tp/l2tp_core.c219
-rw-r--r--net/l2tp/l2tp_core.h36
-rw-r--r--net/l2tp/l2tp_debugfs.c6
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/l2tp/l2tp_ip.c15
-rw-r--r--net/l2tp/l2tp_ip6.c15
-rw-r--r--net/l2tp/l2tp_netlink.c4
-rw-r--r--net/l2tp/l2tp_ppp.c128
-rw-r--r--net/lapb/lapb_iface.c22
-rw-r--r--net/lapb/lapb_in.c320
-rw-r--r--net/lapb/lapb_out.c38
-rw-r--r--net/lapb/lapb_subr.c28
-rw-r--r--net/lapb/lapb_timer.c32
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/llc/llc_output.c3
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/mac80211/agg-rx.c13
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ibss.c22
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/mesh_hwmp.c14
-rw-r--r--net/mac80211/mesh_pathtbl.c12
-rw-r--r--net/mac80211/mlme.c41
-rw-r--r--net/mac80211/rx.c50
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/sta_info.c8
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c28
-rw-r--r--net/mac802154/Kconfig16
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c294
-rw-r--r--net/mac802154/mac802154.h109
-rw-r--r--net/mac802154/mac_cmd.c45
-rw-r--r--net/mac802154/mib.c93
-rw-r--r--net/mac802154/monitor.c116
-rw-r--r--net/mac802154/rx.c114
-rw-r--r--net/mac802154/tx.c116
-rw-r--r--net/netfilter/Kconfig15
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c10
-rw-r--r--net/netfilter/nf_conntrack_amanda.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c13
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/xt_CT.c1
-rw-r--r--net/netfilter/xt_HMARK.c362
-rw-r--r--net/netfilter/xt_TCPMSS.c10
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_hashlimit.c132
-rw-r--r--net/netfilter/xt_limit.c5
-rw-r--r--net/netfilter/xt_mac.c2
-rw-r--r--net/netfilter/xt_set.c15
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/openvswitch/datapath.c29
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/openvswitch/vport-netdev.c6
-rw-r--r--net/sched/Kconfig22
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/ematch.c4
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_codel.c276
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_fq_codel.c626
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_gred.c12
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c18
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c7
-rw-r--r--net/sunrpc/svc.c18
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcsock.c28
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c33
-rw-r--r--net/wireless/lib80211_crypt_tkip.c50
-rw-r--r--net/wireless/mlme.c31
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/util.c11
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/wireless/wext-spy.c2
-rw-r--r--net/xfrm/Kconfig13
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_algo.c5
-rw-r--r--net/xfrm/xfrm_policy.c27
242 files changed, 4852 insertions, 4601 deletions
diff --git a/net/802/Makefile b/net/802/Makefile
index 7893d679910c..a30d6e385aed 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -4,7 +4,6 @@
# Check the p8022 selections against net/core/Makefile.
obj-$(CONFIG_LLC) += p8022.o psnap.o
-obj-$(CONFIG_TR) += p8022.o psnap.o tr.o
obj-$(CONFIG_NET_FC) += fc.o
obj-$(CONFIG_FDDI) += fddi.o
obj-$(CONFIG_HIPPI) += hippi.o
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 7f353c4f437a..0bda8de7df51 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -1,6 +1,5 @@
/*
- * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring
- * is kept separate see p8022tr.c)
+ * NET3: Support for 802.2 demultiplexing off Ethernet
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
diff --git a/net/802/stp.c b/net/802/stp.c
index 15540b7323cd..2c40ba0ec116 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
proto = rcu_dereference(garp_protos[eh->h_dest[5] -
GARP_ADDR_MIN]);
if (proto &&
- compare_ether_addr(eh->h_dest, proto->group_address))
+ !ether_addr_equal(eh->h_dest, proto->group_address))
goto err;
} else
proto = rcu_dereference(stp_proto);
diff --git a/net/802/tr.c b/net/802/tr.c
deleted file mode 100644
index 30a352ed09b1..000000000000
--- a/net/802/tr.c
+++ /dev/null
@@ -1,670 +0,0 @@
-/*
- * NET3: Token ring device handling subroutines
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
- * Added rif table to /proc/net/tr_rif and rif timeout to
- * /proc/sys/net/token-ring/rif_timeout.
- * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
- * tr_header and tr_type_trans to handle passing IPX SNAP and
- * 802.2 through the correct layers. Eliminated tr_reformat.
- *
- */
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/net.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/net_namespace.h>
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
-static void rif_check_expire(unsigned long dummy);
-
-#define TR_SR_DEBUG 0
-
-/*
- * Each RIF entry we learn is kept this way
- */
-
-struct rif_cache {
- unsigned char addr[TR_ALEN];
- int iface;
- __be16 rcf;
- __be16 rseg[8];
- struct rif_cache *next;
- unsigned long last_used;
- unsigned char local_ring;
-};
-
-#define RIF_TABLE_SIZE 32
-
-/*
- * We hash the RIF cache 32 ways. We do after all have to look it
- * up a lot.
- */
-
-static struct rif_cache *rif_table[RIF_TABLE_SIZE];
-
-static DEFINE_SPINLOCK(rif_lock);
-
-
-/*
- * Garbage disposal timer.
- */
-
-static struct timer_list rif_timer;
-
-static int sysctl_tr_rif_timeout = 60*10*HZ;
-
-static inline unsigned long rif_hash(const unsigned char *addr)
-{
- unsigned long x;
-
- x = addr[0];
- x = (x << 2) ^ addr[1];
- x = (x << 2) ^ addr[2];
- x = (x << 2) ^ addr[3];
- x = (x << 2) ^ addr[4];
- x = (x << 2) ^ addr[5];
-
- x ^= x >> 8;
-
- return x & (RIF_TABLE_SIZE - 1);
-}
-
-/*
- * Put the headers on a token ring packet. Token ring source routing
- * makes this a little more exciting than on ethernet.
- */
-
-static int tr_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
-{
- struct trh_hdr *trh;
- int hdr_len;
-
- /*
- * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
- * dev->hard_header directly.
- */
- if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
- {
- struct trllc *trllc;
-
- hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
- trh = (struct trh_hdr *)skb_push(skb, hdr_len);
- trllc = (struct trllc *)(trh+1);
- trllc->dsap = trllc->ssap = EXTENDED_SAP;
- trllc->llc = UI_CMD;
- trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
- trllc->ethertype = htons(type);
- }
- else
- {
- hdr_len = sizeof(struct trh_hdr);
- trh = (struct trh_hdr *)skb_push(skb, hdr_len);
- }
-
- trh->ac=AC;
- trh->fc=LLC_FRAME;
-
- if(saddr)
- memcpy(trh->saddr,saddr,dev->addr_len);
- else
- memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
-
- /*
- * Build the destination and then source route the frame
- */
-
- if(daddr)
- {
- memcpy(trh->daddr,daddr,dev->addr_len);
- tr_source_route(skb, trh, dev);
- return hdr_len;
- }
-
- return -hdr_len;
-}
-
-/*
- * A neighbour discovery of some species (eg arp) has completed. We
- * can now send the packet.
- */
-
-static int tr_rebuild_header(struct sk_buff *skb)
-{
- struct trh_hdr *trh=(struct trh_hdr *)skb->data;
- struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
- struct net_device *dev = skb->dev;
-
- /*
- * FIXME: We don't yet support IPv6 over token rings
- */
-
- if(trllc->ethertype != htons(ETH_P_IP)) {
- printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
- return 0;
- }
-
-#ifdef CONFIG_INET
- if(arp_find(trh->daddr, skb)) {
- return 1;
- }
- else
-#endif
- {
- tr_source_route(skb,trh,dev);
- return 0;
- }
-}
-
-/*
- * Some of this is a bit hackish. We intercept RIF information
- * used for source routing. We also grab IP directly and don't feed
- * it via SNAP.
- */
-
-__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
-
- struct trh_hdr *trh;
- struct trllc *trllc;
- unsigned int riflen=0;
-
- skb->dev = dev;
- skb_reset_mac_header(skb);
- trh = tr_hdr(skb);
-
- if(trh->saddr[0] & TR_RII)
- riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
-
- trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
- skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
- if(*trh->daddr & 0x80)
- {
- if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
- skb->pkt_type=PACKET_BROADCAST;
- else
- skb->pkt_type=PACKET_MULTICAST;
- }
- else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
- {
- skb->pkt_type=PACKET_MULTICAST;
- }
- else if(dev->flags & IFF_PROMISC)
- {
- if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
- skb->pkt_type=PACKET_OTHERHOST;
- }
-
- if ((skb->pkt_type != PACKET_BROADCAST) &&
- (skb->pkt_type != PACKET_MULTICAST))
- tr_add_rif_info(trh,dev) ;
-
- /*
- * Strip the SNAP header from ARP packets since we don't
- * pass them through to the 802.2/SNAP layers.
- */
-
- if (trllc->dsap == EXTENDED_SAP &&
- (trllc->ethertype == htons(ETH_P_IP) ||
- trllc->ethertype == htons(ETH_P_IPV6) ||
- trllc->ethertype == htons(ETH_P_ARP)))
- {
- skb_pull(skb, sizeof(struct trllc));
- return trllc->ethertype;
- }
-
- return htons(ETH_P_TR_802_2);
-}
-
-/*
- * We try to do source routing...
- */
-
-void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
- struct net_device *dev)
-{
- int slack;
- unsigned int hash;
- struct rif_cache *entry;
- unsigned char *olddata;
- unsigned long flags;
- static const unsigned char mcast_func_addr[]
- = {0xC0,0x00,0x00,0x04,0x00,0x00};
-
- spin_lock_irqsave(&rif_lock, flags);
-
- /*
- * Broadcasts are single route as stated in RFC 1042
- */
- if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
- (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
- {
- trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
- | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
- trh->saddr[0]|=TR_RII;
- }
- else
- {
- hash = rif_hash(trh->daddr);
- /*
- * Walk the hash table and look for an entry
- */
- for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
-
- /*
- * If we found an entry we can route the frame.
- */
- if(entry)
- {
-#if TR_SR_DEBUG
-printk("source routing for %pM\n", trh->daddr);
-#endif
- if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
- {
- trh->rcf=entry->rcf;
- memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
- trh->rcf^=htons(TR_RCF_DIR_BIT);
- trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
-
- trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
- printk("entry found with rcf %04x\n", entry->rcf);
- }
- else
- {
- printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
-#endif
- }
- entry->last_used=jiffies;
- }
- else
- {
- /*
- * Without the information we simply have to shout
- * on the wire. The replies should rapidly clean this
- * situation up.
- */
- trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
- | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
- trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
- printk("no entry in rif table found - broadcasting frame\n");
-#endif
- }
- }
-
- /* Compress the RIF here so we don't have to do it in the driver(s) */
- if (!(trh->saddr[0] & 0x80))
- slack = 18;
- else
- slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
- olddata = skb->data;
- spin_unlock_irqrestore(&rif_lock, flags);
-
- skb_pull(skb, slack);
- memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
-}
-
-/*
- * We have learned some new RIF information for our source
- * routing.
- */
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
-{
- unsigned int hash, rii_p = 0;
- unsigned long flags;
- struct rif_cache *entry;
- unsigned char saddr0;
-
- spin_lock_irqsave(&rif_lock, flags);
- saddr0 = trh->saddr[0];
-
- /*
- * Firstly see if the entry exists
- */
-
- if(trh->saddr[0] & TR_RII)
- {
- trh->saddr[0]&=0x7f;
- if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
- {
- rii_p = 1;
- }
- }
-
- hash = rif_hash(trh->saddr);
- for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
-
- if(entry==NULL)
- {
-#if TR_SR_DEBUG
- printk("adding rif_entry: addr:%pM rcf:%04X\n",
- trh->saddr, ntohs(trh->rcf));
-#endif
- /*
- * Allocate our new entry. A failure to allocate loses
- * use the information. This is harmless.
- *
- * FIXME: We ought to keep some kind of cache size
- * limiting and adjust the timers to suit.
- */
- entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
-
- if(!entry)
- {
- printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
- spin_unlock_irqrestore(&rif_lock, flags);
- return;
- }
-
- memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
- entry->iface = dev->ifindex;
- entry->next=rif_table[hash];
- entry->last_used=jiffies;
- rif_table[hash]=entry;
-
- if (rii_p)
- {
- entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
- memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
- entry->local_ring = 0;
- }
- else
- {
- entry->local_ring = 1;
- }
- }
- else /* Y. Tahara added */
- {
- /*
- * Update existing entries
- */
- if (!entry->local_ring)
- if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
- !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
- {
-#if TR_SR_DEBUG
-printk("updating rif_entry: addr:%pM rcf:%04X\n",
- trh->saddr, ntohs(trh->rcf));
-#endif
- entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
- memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
- }
- entry->last_used=jiffies;
- }
- trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
- spin_unlock_irqrestore(&rif_lock, flags);
-}
-
-/*
- * Scan the cache with a timer and see what we need to throw out.
- */
-
-static void rif_check_expire(unsigned long dummy)
-{
- int i;
- unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
-
- spin_lock_irqsave(&rif_lock, flags);
-
- for(i =0; i < RIF_TABLE_SIZE; i++) {
- struct rif_cache *entry, **pentry;
-
- pentry = rif_table+i;
- while((entry=*pentry) != NULL) {
- unsigned long expires
- = entry->last_used + sysctl_tr_rif_timeout;
-
- if (time_before_eq(expires, jiffies)) {
- *pentry = entry->next;
- kfree(entry);
- } else {
- pentry = &entry->next;
-
- if (time_before(expires, next_interval))
- next_interval = expires;
- }
- }
- }
-
- spin_unlock_irqrestore(&rif_lock, flags);
-
- mod_timer(&rif_timer, next_interval);
-
-}
-
-/*
- * Generate the /proc/net information for the token ring RIF
- * routing.
- */
-
-#ifdef CONFIG_PROC_FS
-
-static struct rif_cache *rif_get_idx(loff_t pos)
-{
- int i;
- struct rif_cache *entry;
- loff_t off = 0;
-
- for(i = 0; i < RIF_TABLE_SIZE; i++)
- for(entry = rif_table[i]; entry; entry = entry->next) {
- if (off == pos)
- return entry;
- ++off;
- }
-
- return NULL;
-}
-
-static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&rif_lock)
-{
- spin_lock_irq(&rif_lock);
-
- return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- int i;
- struct rif_cache *ent = v;
-
- ++*pos;
-
- if (v == SEQ_START_TOKEN) {
- i = -1;
- goto scan;
- }
-
- if (ent->next)
- return ent->next;
-
- i = rif_hash(ent->addr);
- scan:
- while (++i < RIF_TABLE_SIZE) {
- if ((ent = rif_table[i]) != NULL)
- return ent;
- }
- return NULL;
-}
-
-static void rif_seq_stop(struct seq_file *seq, void *v)
- __releases(&rif_lock)
-{
- spin_unlock_irq(&rif_lock);
-}
-
-static int rif_seq_show(struct seq_file *seq, void *v)
-{
- int j, rcf_len, segment, brdgnmb;
- struct rif_cache *entry = v;
-
- if (v == SEQ_START_TOKEN)
- seq_puts(seq,
- "if TR address TTL rcf routing segments\n");
- else {
- struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
- long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
- - (long) jiffies;
-
- seq_printf(seq, "%s %pM %7li ",
- dev?dev->name:"?",
- entry->addr,
- ttl/HZ);
-
- if (entry->local_ring)
- seq_puts(seq, "local\n");
- else {
-
- seq_printf(seq, "%04X", ntohs(entry->rcf));
- rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
- if (rcf_len)
- rcf_len >>= 1;
- for(j = 1; j < rcf_len; j++) {
- if(j==1) {
- segment=ntohs(entry->rseg[j-1])>>4;
- seq_printf(seq," %03X",segment);
- }
-
- segment=ntohs(entry->rseg[j])>>4;
- brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
- seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
- }
- seq_putc(seq, '\n');
- }
-
- if (dev)
- dev_put(dev);
- }
- return 0;
-}
-
-
-static const struct seq_operations rif_seq_ops = {
- .start = rif_seq_start,
- .next = rif_seq_next,
- .stop = rif_seq_stop,
- .show = rif_seq_show,
-};
-
-static int rif_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &rif_seq_ops);
-}
-
-static const struct file_operations rif_seq_fops = {
- .owner = THIS_MODULE,
- .open = rif_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#endif
-
-static const struct header_ops tr_header_ops = {
- .create = tr_header,
- .rebuild= tr_rebuild_header,
-};
-
-static void tr_setup(struct net_device *dev)
-{
- /*
- * Configure and register
- */
-
- dev->header_ops = &tr_header_ops;
-
- dev->type = ARPHRD_IEEE802_TR;
- dev->hard_header_len = TR_HLEN;
- dev->mtu = 2000;
- dev->addr_len = TR_ALEN;
- dev->tx_queue_len = 100; /* Long queues on tr */
-
- memset(dev->broadcast,0xFF, TR_ALEN);
-
- /* New-style flags. */
- dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
-}
-
-/**
- * alloc_trdev - Register token ring device
- * @sizeof_priv: Size of additional driver-private structure to be allocated
- * for this token ring device
- *
- * Fill in the fields of the device structure with token ring-generic values.
- *
- * Constructs a new net device, complete with a private data area of
- * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
- * this private data area.
- */
-struct net_device *alloc_trdev(int sizeof_priv)
-{
- return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
-}
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tr_table[] = {
- {
- .procname = "rif_timeout",
- .data = &sysctl_tr_rif_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- { },
-};
-#endif
-
-/*
- * Called during bootup. We don't actually have to initialise
- * too much for this.
- */
-
-static int __init rif_init(void)
-{
- rif_timer.expires = jiffies + sysctl_tr_rif_timeout;
- setup_timer(&rif_timer, rif_check_expire, 0);
- add_timer(&rif_timer);
-#ifdef CONFIG_SYSCTL
- register_net_sysctl(&init_net, "net/token-ring", tr_table);
-#endif
- proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
- return 0;
-}
-
-module_init(rif_init);
-
-EXPORT_SYMBOL(tr_type_trans);
-EXPORT_SYMBOL(alloc_trdev);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index efea35b02e7f..6089f0cf23b4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev,
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
/* May be called without an actual change */
- if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
+ if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
/* vlan address was different from the old address and is equal to
* the new address */
- if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
- !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
- if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
- compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4d39d802be2c..8ca533c95de0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
- if (!compare_ether_addr(eth_hdr(skb)->h_dest,
- vlan_dev->dev_addr))
+ if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb372..da1bc9c3cf38 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
- skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
+ skb->dev = vlan_dev_priv(dev)->real_dev;
len = skb->len;
if (netpoll_tx_running(dev))
return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
@@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev)
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
@@ -307,7 +307,7 @@ clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
del_unicast:
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
@@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
@@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
if (!(dev->flags & IFF_UP))
goto out;
- if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
+ if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
diff --git a/net/Kconfig b/net/Kconfig
index e07272d0bb2d..245831bec09a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -207,10 +207,10 @@ source "net/ipx/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
source "net/lapb/Kconfig"
-source "net/econet/Kconfig"
source "net/wanrouter/Kconfig"
source "net/phonet/Kconfig"
source "net/ieee802154/Kconfig"
+source "net/mac802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
source "net/dns_resolver/Kconfig"
@@ -246,9 +246,6 @@ config BQL
select DQL
default y
-config HAVE_BPF_JIT
- bool
-
config BPF_JIT
bool "enable BPF Just In Time compiler"
depends on HAVE_BPF_JIT
@@ -295,7 +292,7 @@ config NET_TCPPROBE
module will be called tcp_probe.
config NET_DROP_MONITOR
- boolean "Network packet drop alerting service"
+ tristate "Network packet drop alerting service"
depends on INET && EXPERIMENTAL && TRACEPOINTS
---help---
This feature provides an alerting service to userspace in the
@@ -340,3 +337,7 @@ source "net/nfc/Kconfig"
endif # if NET
+
+# Used by archs to tell that they support BPF_JIT
+config HAVE_BPF_JIT
+ bool
diff --git a/net/Makefile b/net/Makefile
index ad432fa4d934..4f4ee083064c 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
-obj-$(CONFIG_ECONET) += econet/
obj-$(CONFIG_PHONET) += phonet/
ifneq ($(CONFIG_VLAN_8021Q),)
obj-y += 8021q/
@@ -60,6 +59,7 @@ ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
obj-$(CONFIG_IEEE802154) += ieee802154/
+obj-$(CONFIG_MAC802154) += mac802154/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 62dc8bfe6fe7..bbd3b639992e 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
error = sock_get_timestampns(sk, argp);
goto done;
case ATM_SETSC:
- if (net_ratelimit())
- pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
- current->comm, task_pid_nr(current));
+ net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
+ current->comm, task_pid_nr(current));
error = 0;
goto done;
case ATMSIGD_CTRL:
@@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
work for 32-bit userspace. TBH I don't really want
to think about it at all. dwmw2. */
if (compat) {
- if (net_ratelimit())
- pr_warning("32-bit task cannot be atmsigd\n");
+ net_warn_ratelimited("32-bit task cannot be atmsigd\n");
error = -EINVAL;
goto done;
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index f1964caa0f83..a7d172105c99 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -26,11 +26,6 @@
#include <linux/spinlock.h>
#include <linux/seq_file.h>
-/* TokenRing if needed */
-#ifdef CONFIG_TR
-#include <linux/trdevice.h>
-#endif
-
/* And atm device */
#include <linux/atmdev.h>
#include <linux/atmlec.h>
@@ -163,50 +158,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
/*
- * Modelled after tr_type_trans
- * All multicast and ARE or STE frames go to BUS.
- * Non source routed frames go by destination address.
- * Last hop source routed frames go by destination address.
- * Not last hop source routed frames go by _next_ route descriptor.
- * Returns pointer to destination MAC address or fills in rdesc
- * and returns NULL.
- */
-#ifdef CONFIG_TR
-static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
-{
- struct trh_hdr *trh;
- unsigned int riflen, num_rdsc;
-
- trh = (struct trh_hdr *)packet;
- if (trh->daddr[0] & (uint8_t) 0x80)
- return bus_mac; /* multicast */
-
- if (trh->saddr[0] & TR_RII) {
- riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
- if ((ntohs(trh->rcf) >> 13) != 0)
- return bus_mac; /* ARE or STE */
- } else
- return trh->daddr; /* not source routed */
-
- if (riflen < 6)
- return trh->daddr; /* last hop, source routed */
-
- /* riflen is 6 or more, packet has more than one route descriptor */
- num_rdsc = (riflen / 2) - 1;
- memset(rdesc, 0, ETH_ALEN);
- /* offset 4 comes from LAN destination field in LE control frames */
- if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
- memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
- else {
- memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
- rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
- }
-
- return NULL;
-}
-#endif /* CONFIG_TR */
-
-/*
* Open/initialize the netdevice. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
@@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct lec_arp_table *entry;
unsigned char *dst;
int min_frame_size;
-#ifdef CONFIG_TR
- unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
-#endif
int is_rdesc;
pr_debug("called\n");
@@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
}
skb_push(skb, 2);
- /* Put le header to place, works for TokenRing too */
+ /* Put le header to place */
lec_h = (struct lecdatahdr_8023 *)skb->data;
lec_h->le_header = htons(priv->lecid);
-#ifdef CONFIG_TR
- /*
- * Ugly. Use this to realign Token Ring packets for
- * e.g. PCA-200E driver.
- */
- if (priv->is_trdev) {
- skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
- kfree_skb(skb);
- if (skb2 == NULL)
- return NETDEV_TX_OK;
- skb = skb2;
- }
-#endif
-
#if DUMP_PACKETS >= 2
#define MAX_DUMP_SKB 99
#elif DUMP_PACKETS >= 1
@@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
#endif /* DUMP_PACKETS >= 1 */
/* Minimum ethernet-frame size */
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- min_frame_size = LEC_MINIMUM_8025_SIZE;
- else
-#endif
- min_frame_size = LEC_MINIMUM_8023_SIZE;
+ min_frame_size = LEC_MINIMUM_8023_SIZE;
if (skb->len < min_frame_size) {
if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
skb2 = skb_copy_expand(skb, 0,
@@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
/* Send to right vcc */
is_rdesc = 0;
dst = lec_h->h_dest;
-#ifdef CONFIG_TR
- if (priv->is_trdev) {
- dst = get_tr_dst(skb->data + 2, rdesc);
- if (dst == NULL) {
- dst = rdesc;
- is_rdesc = 1;
- }
- }
-#endif
entry = NULL;
vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
@@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
- else
-#endif
- dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
+ dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
/*
* If this is a Data Direct VCC, and the VCC does not match
@@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
*/
spin_lock_irqsave(&priv->lec_arp_lock, flags);
if (lec_is_data_direct(vcc)) {
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- src =
- ((struct lecdatahdr_8025 *)skb->data)->
- h_source;
- else
-#endif
- src =
- ((struct lecdatahdr_8023 *)skb->data)->
- h_source;
+ src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
entry = lec_arp_find(priv, src);
if (entry && entry->vcc != vcc) {
lec_arp_remove(priv, entry);
@@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
if (!hlist_empty(&priv->lec_arp_empty_ones))
lec_arp_check_empties(priv, vcc, skb);
skb_pull(skb, 2); /* skip lec_id */
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- skb->protocol = tr_type_trans(skb, dev);
- else
-#endif
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
@@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = 0;
else
i = arg;
-#ifdef CONFIG_TR
if (arg >= MAX_LEC_ITF)
return -EINVAL;
-#else /* Reserve the top NUM_TR_DEVS for TR */
- if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
- return -EINVAL;
-#endif
if (!dev_lec[i]) {
- int is_trdev, size;
-
- is_trdev = 0;
- if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
- is_trdev = 1;
+ int size;
size = sizeof(struct lec_priv);
-#ifdef CONFIG_TR
- if (is_trdev)
- dev_lec[i] = alloc_trdev(size);
- else
-#endif
- dev_lec[i] = alloc_etherdev(size);
+ dev_lec[i] = alloc_etherdev(size);
if (!dev_lec[i])
return -ENOMEM;
dev_lec[i]->netdev_ops = &lec_netdev_ops;
@@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
}
priv = netdev_priv(dev_lec[i]);
- priv->is_trdev = is_trdev;
} else {
priv = netdev_priv(dev_lec[i]);
if (priv->lecd)
@@ -1255,7 +1141,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
struct sk_buff *skb;
struct lec_priv *priv = netdev_priv(dev);
- if (compare_ether_addr(lan_dst, dev->dev_addr))
+ if (!ether_addr_equal(lan_dst, dev->dev_addr))
return 0; /* not our mac address */
kfree(priv->tlvs); /* NULL if there was no previous association */
@@ -1662,7 +1548,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
hlist_for_each_entry(entry, node, head, next) {
- if (!compare_ether_addr(mac_addr, entry->mac_addr))
+ if (ether_addr_equal(mac_addr, entry->mac_addr))
return entry;
}
return NULL;
@@ -1849,7 +1735,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
case 1:
return priv->mcast_vcc;
case 2: /* LANE2 wants arp for multicast addresses */
- if (!compare_ether_addr(mac_to_find, bus_mac))
+ if (ether_addr_equal(mac_to_find, bus_mac))
return priv->mcast_vcc;
break;
default:
@@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv,
struct hlist_node *node, *next;
struct lec_arp_table *entry, *tmp;
struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
- unsigned char *src;
-#ifdef CONFIG_TR
- struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
-
- if (priv->is_trdev)
- src = tr_hdr->h_source;
- else
-#endif
- src = hdr->h_source;
+ unsigned char *src = hdr->h_source;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
hlist_for_each_entry_safe(entry, node, next,
diff --git a/net/atm/lec.h b/net/atm/lec.h
index dfc071966463..c730e57de199 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -142,7 +142,6 @@ struct lec_priv {
int itfnum; /* e.g. 2 for lec2, 5 for lec5 */
struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */
int is_proxy; /* bridge between ATM and Ethernet */
- int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */
};
struct lec_vcc_priv {
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index aa972409f093..d4cc1be5c364 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
goto non_ip;
while (i < mpc->number_of_mps_macs) {
- if (!compare_ether_addr(eth->h_dest,
- (mpc->mps_macs + i*ETH_ALEN)))
+ if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN))
if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
return NETDEV_TX_OK;
i++;
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 916380c73ab7..3b588f86d770 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -83,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
va_start(args, fmt);
vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- fdebug_log(bat_priv->debug_log, "[%10lu] %s",
- (jiffies / HZ), tmp_log_buf);
+ fdebug_log(bat_priv->debug_log, "[%10u] %s",
+ jiffies_to_msecs(jiffies), tmp_log_buf);
va_end(args);
return 0;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 8b2db2e76c7e..dc53798ebb47 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -30,6 +30,31 @@
#include "send.h"
#include "bat_algo.h"
+static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ struct orig_node *orig_node,
+ struct orig_node *orig_neigh,
+ uint32_t seqno)
+{
+ struct neigh_node *neigh_node;
+
+ neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
+ if (!neigh_node)
+ goto out;
+
+ INIT_LIST_HEAD(&neigh_node->bonding_list);
+
+ neigh_node->orig_node = orig_neigh;
+ neigh_node->if_incoming = hard_iface;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+out:
+ return neigh_node;
+}
+
static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
@@ -67,24 +92,24 @@ static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
hard_iface->packet_buff = NULL;
}
-static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
- batman_ogm_packet->header.ttl = TTL;
+ memcpy(batman_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
+ memcpy(batman_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
}
-static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
+static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- memcpy(batman_ogm_packet->orig,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
- memcpy(batman_ogm_packet->prev_sender,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
+ batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
+ batman_ogm_packet->header.ttl = TTL;
}
/* when do we schedule our own ogm to be sent */
@@ -480,11 +505,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
static void bat_iv_ogm_forward(struct orig_node *orig_node,
const struct ethhdr *ethhdr,
struct batman_ogm_packet *batman_ogm_packet,
- int directlink, struct hard_iface *if_incoming)
+ bool is_single_hop_neigh,
+ bool is_from_best_next_hop,
+ struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *router;
- uint8_t in_tq, in_ttl, tq_avg = 0;
uint8_t tt_num_changes;
if (batman_ogm_packet->header.ttl <= 1) {
@@ -492,48 +517,37 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
return;
}
- router = orig_node_get_router(orig_node);
+ if (!is_from_best_next_hop) {
+ /* Mark the forwarded packet when it is not coming from our
+ * best next hop. We still need to forward the packet for our
+ * neighbor link quality detection to work in case the packet
+ * originated from a single hop neighbor. Otherwise we can
+ * simply drop the ogm.
+ */
+ if (is_single_hop_neigh)
+ batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
+ else
+ return;
+ }
- in_tq = batman_ogm_packet->tq;
- in_ttl = batman_ogm_packet->header.ttl;
tt_num_changes = batman_ogm_packet->tt_num_changes;
batman_ogm_packet->header.ttl--;
memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
- /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
- * of our best tq value */
- if (router && router->tq_avg != 0) {
-
- /* rebroadcast ogm of best ranking neighbor as is */
- if (!compare_eth(router->addr, ethhdr->h_source)) {
- batman_ogm_packet->tq = router->tq_avg;
-
- if (router->last_ttl)
- batman_ogm_packet->header.ttl =
- router->last_ttl - 1;
- }
-
- tq_avg = router->tq_avg;
- }
-
- if (router)
- neigh_node_free_ref(router);
-
/* apply hop penalty */
batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
- in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
- batman_ogm_packet->header.ttl);
+ "Forwarding packet: tq: %i, ttl: %i\n",
+ batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
/* switch of primaries first hop flag when forwarding */
batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
- if (directlink)
+ if (is_single_hop_neigh)
batman_ogm_packet->flags |= DIRECTLINK;
else
batman_ogm_packet->flags &= ~DIRECTLINK;
@@ -622,12 +636,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
if (is_duplicate)
continue;
- spin_lock_bh(&tmp_neigh_node->tq_lock);
+ spin_lock_bh(&tmp_neigh_node->lq_update_lock);
ring_buffer_set(tmp_neigh_node->tq_recv,
&tmp_neigh_node->tq_index, 0);
tmp_neigh_node->tq_avg =
ring_buffer_avg(tmp_neigh_node->tq_recv);
- spin_unlock_bh(&tmp_neigh_node->tq_lock);
+ spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
}
if (!neigh_node) {
@@ -637,8 +651,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
if (!orig_tmp)
goto unlock;
- neigh_node = create_neighbor(orig_node, orig_tmp,
- ethhdr->h_source, if_incoming);
+ neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
+ orig_node, orig_tmp,
+ batman_ogm_packet->seqno);
orig_node_free_ref(orig_tmp);
if (!neigh_node)
@@ -650,14 +665,14 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
rcu_read_unlock();
orig_node->flags = batman_ogm_packet->flags;
- neigh_node->last_valid = jiffies;
+ neigh_node->last_seen = jiffies;
- spin_lock_bh(&neigh_node->tq_lock);
+ spin_lock_bh(&neigh_node->lq_update_lock);
ring_buffer_set(neigh_node->tq_recv,
&neigh_node->tq_index,
batman_ogm_packet->tq);
neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
- spin_unlock_bh(&neigh_node->tq_lock);
+ spin_unlock_bh(&neigh_node->lq_update_lock);
if (!is_duplicate) {
orig_node->last_ttl = batman_ogm_packet->header.ttl;
@@ -763,19 +778,20 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
rcu_read_unlock();
if (!neigh_node)
- neigh_node = create_neighbor(orig_neigh_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
+ neigh_node = bat_iv_ogm_neigh_new(if_incoming,
+ orig_neigh_node->orig,
+ orig_neigh_node,
+ orig_neigh_node,
+ batman_ogm_packet->seqno);
if (!neigh_node)
goto out;
- /* if orig_node is direct neighbor update neigh_node last_valid */
+ /* if orig_node is direct neighbor update neigh_node last_seen */
if (orig_node == orig_neigh_node)
- neigh_node->last_valid = jiffies;
+ neigh_node->last_seen = jiffies;
- orig_node->last_valid = jiffies;
+ orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
spin_lock_bh(&orig_node->ogm_cnt_lock);
@@ -918,7 +934,9 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
struct neigh_node *orig_neigh_router = NULL;
int has_directlink_flag;
int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+ int is_broadcast = 0, is_bidirectional;
+ bool is_single_hop_neigh = false;
+ bool is_from_best_next_hop = false;
int is_duplicate;
uint32_t if_incoming_seqno;
@@ -942,8 +960,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
- is_single_hop_neigh = (compare_eth(ethhdr->h_source,
- batman_ogm_packet->orig) ? 1 : 0);
+ if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
+ is_single_hop_neigh = true;
bat_dbg(DBG_BATMAN, bat_priv,
"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
@@ -1040,6 +1058,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
return;
}
+ if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+ ethhdr->h_source);
+ return;
+ }
+
orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
if (!orig_node)
return;
@@ -1064,6 +1089,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
if (router)
router_router = orig_node_get_router(router->orig_node);
+ if ((router && router->tq_avg != 0) &&
+ (compare_eth(router->addr, ethhdr->h_source)))
+ is_from_best_next_hop = true;
+
/* avoid temporary routing loops */
if (router && router_router &&
(compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
@@ -1114,7 +1143,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
/* mark direct link on incoming interface */
bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- 1, if_incoming);
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
@@ -1137,7 +1167,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- 0, if_incoming);
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1153,13 +1184,25 @@ out:
orig_node_free_ref(orig_node);
}
-static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
- struct sk_buff *skb)
+static int bat_iv_ogm_receive(struct sk_buff *skb,
+ struct hard_iface *if_incoming)
{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batman_ogm_packet *batman_ogm_packet;
struct ethhdr *ethhdr;
int buff_pos = 0, packet_len;
unsigned char *tt_buff, *packet_buff;
+ bool ret;
+
+ ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
+ if (!ret)
+ return NET_RX_DROP;
+
+ /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
+ * that does not have B.A.T.M.A.N. IV enabled ?
+ */
+ if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
+ return NET_RX_DROP;
packet_len = skb_headlen(skb);
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1185,20 +1228,38 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
(packet_buff + buff_pos);
} while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
batman_ogm_packet->tt_num_changes));
+
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
}
static struct bat_algo_ops batman_iv __read_mostly = {
.name = "BATMAN IV",
.bat_iface_enable = bat_iv_ogm_iface_enable,
.bat_iface_disable = bat_iv_ogm_iface_disable,
+ .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
.bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
- .bat_ogm_update_mac = bat_iv_ogm_update_mac,
.bat_ogm_schedule = bat_iv_ogm_schedule,
.bat_ogm_emit = bat_iv_ogm_emit,
- .bat_ogm_receive = bat_iv_ogm_receive,
};
int __init bat_iv_init(void)
{
- return bat_algo_register(&batman_iv);
+ int ret;
+
+ /* batman originator packet */
+ ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
+ if (ret < 0)
+ goto out;
+
+ ret = bat_algo_register(&batman_iv);
+ if (ret < 0)
+ goto handler_unregister;
+
+ goto out;
+
+handler_unregister:
+ recv_handler_unregister(BAT_IV_OGM);
+out:
+ return ret;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 2c816883ca13..5bc7b66d32dc 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \
.store = _store, \
};
-#define BAT_ATTR_STORE_BOOL(_name, _post_func) \
+#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
char *buff, size_t count) \
{ \
@@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
&bat_priv->_name, net_dev); \
}
-#define BAT_ATTR_SHOW_BOOL(_name) \
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff) \
+#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
{ \
struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
return sprintf(buff, "%s\n", \
@@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
"disabled" : "enabled"); \
} \
-/* Use this, if you are going to turn a [name] in bat_priv on or off */
-#define BAT_ATTR_BOOL(_name, _mode, _post_func) \
- static BAT_ATTR_STORE_BOOL(_name, _post_func) \
- static BAT_ATTR_SHOW_BOOL(_name) \
+/* Use this, if you are going to turn a [name] in the soft-interface
+ * (bat_priv) on or off */
+#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
+ static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ static BAT_ATTR_SIF_SHOW_BOOL(_name) \
static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \
+#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
+ char *buff, size_t count) \
{ \
struct net_device *net_dev = kobj_to_netdev(kobj); \
struct bat_priv *bat_priv = netdev_priv(net_dev); \
@@ -100,19 +101,62 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
attr, &bat_priv->_name, net_dev); \
}
-#define BAT_ATTR_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff) \
+#define BAT_ATTR_SIF_SHOW_UINT(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
{ \
struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
} \
-/* Use this, if you are going to set [name] in bat_priv to unsigned integer
- * values only */
-#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_SHOW_UINT(_name) \
+/* Use this, if you are going to set [name] in the soft-interface
+ * (bat_priv) to an unsigned integer value */
+#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
+ static BAT_ATTR_SIF_SHOW_UINT(_name) \
+ static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
+
+
+#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
+ char *buff, size_t count) \
+{ \
+ struct net_device *net_dev = kobj_to_netdev(kobj); \
+ struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
+ ssize_t length; \
+ \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = __store_uint_attr(buff, count, _min, _max, _post_func, \
+ attr, &hard_iface->_name, net_dev); \
+ \
+ hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+#define BAT_ATTR_HIF_SHOW_UINT(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct net_device *net_dev = kobj_to_netdev(kobj); \
+ struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
+ ssize_t length; \
+ \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
+ \
+ hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value*/
+#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ static BAT_ATTR_HIF_SHOW_UINT(_name) \
static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
@@ -384,24 +428,24 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
return gw_bandwidth_set(net_dev, buff, count);
}
-BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
#ifdef CONFIG_BATMAN_ADV_BLA
-BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
#endif
-BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
-BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
+BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
-BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
-BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
-BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
- post_gw_deselect);
+BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
+BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
+BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
+ post_gw_deselect);
static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
+BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
#endif
static struct bat_attribute *mesh_attrs[] = {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ad394c6496cc..8bf97515a77d 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 4a8e4fc766bc..e39f93acc28f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 6f9b9b78f77d..47f7186dcefc 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -558,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
p++;
/* ...and then we jump over the data */
- if (pkt_len < *p)
+ if (pkt_len < 1 + (*p))
goto out;
- pkt_len -= *p;
- p += (*p);
+ pkt_len -= 1 + (*p);
+ p += 1 + (*p);
}
}
out:
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 47c79d724ba3..dc334fa89847 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -32,12 +32,6 @@
#include <linux/if_arp.h>
-
-static int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
-
void hardif_free_rcu(struct rcu_head *rcu)
{
struct hard_iface *hard_iface;
@@ -179,9 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev)
net_dev->dev_addr))
continue;
- pr_warning("The newly added mac address (%pM) already exists on: %s\n",
- net_dev->dev_addr, hard_iface->net_dev->name);
- pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
+ pr_warn("The newly added mac address (%pM) already exists on: %s\n",
+ net_dev->dev_addr, hard_iface->net_dev->name);
+ pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
}
rcu_read_unlock();
}
@@ -234,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+ bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
hard_iface->if_status = IF_TO_BE_ACTIVATED;
/**
@@ -530,7 +524,7 @@ static int hard_if_event(struct notifier_block *this,
check_known_mac_addr(hard_iface->net_dev);
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+ bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
@@ -551,113 +545,6 @@ out:
return NOTIFY_DONE;
}
-/* incoming packets with the batman ethertype received on any active hard
- * interface */
-static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev)
-{
- struct bat_priv *bat_priv;
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *hard_iface;
- int ret;
-
- hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
- skb = skb_share_check(skb, GFP_ATOMIC);
-
- /* skb was released by skb_share_check() */
- if (!skb)
- goto err_out;
-
- /* packet should hold at least type and version */
- if (unlikely(!pskb_may_pull(skb, 2)))
- goto err_free;
-
- /* expect a valid ethernet header here. */
- if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
- goto err_free;
-
- if (!hard_iface->soft_iface)
- goto err_free;
-
- bat_priv = netdev_priv(hard_iface->soft_iface);
-
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
- goto err_free;
-
- /* discard frames on not active interfaces */
- if (hard_iface->if_status != IF_ACTIVE)
- goto err_free;
-
- batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
-
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
- goto err_free;
- }
-
- /* all receive handlers return whether they received or reused
- * the supplied skb. if not, we have to free the skb. */
-
- switch (batman_ogm_packet->header.packet_type) {
- /* batman originator packet */
- case BAT_IV_OGM:
- ret = recv_bat_ogm_packet(skb, hard_iface);
- break;
-
- /* batman icmp packet */
- case BAT_ICMP:
- ret = recv_icmp_packet(skb, hard_iface);
- break;
-
- /* unicast packet */
- case BAT_UNICAST:
- ret = recv_unicast_packet(skb, hard_iface);
- break;
-
- /* fragmented unicast packet */
- case BAT_UNICAST_FRAG:
- ret = recv_ucast_frag_packet(skb, hard_iface);
- break;
-
- /* broadcast packet */
- case BAT_BCAST:
- ret = recv_bcast_packet(skb, hard_iface);
- break;
-
- /* vis packet */
- case BAT_VIS:
- ret = recv_vis_packet(skb, hard_iface);
- break;
- /* Translation table query (request or response) */
- case BAT_TT_QUERY:
- ret = recv_tt_query(skb, hard_iface);
- break;
- /* Roaming advertisement */
- case BAT_ROAM_ADV:
- ret = recv_roam_adv(skb, hard_iface);
- break;
- default:
- ret = NET_RX_DROP;
- }
-
- if (ret == NET_RX_DROP)
- kfree_skb(skb);
-
- /* return NET_RX_SUCCESS in any case as we
- * most probably dropped the packet for
- * routing-logical reasons. */
-
- return NET_RX_SUCCESS;
-
-err_free:
- kfree_skb(skb);
-err_out:
- return NET_RX_DROP;
-}
-
/* This function returns true if the interface represented by ifindex is a
* 802.11 wireless device */
bool is_wifi_iface(int ifindex)
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 791327219531..083a2993efe4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -39,6 +39,7 @@
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked */
struct list_head hardif_list;
+static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
char bat_routing_algo[20] = "BATMAN IV";
static struct hlist_head bat_algo_list;
@@ -46,11 +47,15 @@ unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct workqueue_struct *bat_event_workqueue;
+static void recv_handler_init(void);
+
static int __init batman_init(void)
{
INIT_LIST_HEAD(&hardif_list);
INIT_HLIST_HEAD(&bat_algo_list);
+ recv_handler_init();
+
bat_iv_init();
/* the name should not be longer than 10 chars - see
@@ -179,6 +184,120 @@ int is_my_mac(const uint8_t *addr)
return 0;
}
+static int recv_unhandled_packet(struct sk_buff *skb,
+ struct hard_iface *recv_if)
+{
+ return NET_RX_DROP;
+}
+
+/* incoming packets with the batman ethertype received on any active hard
+ * interface
+ */
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+{
+ struct bat_priv *bat_priv;
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct hard_iface *hard_iface;
+ uint8_t idx;
+ int ret;
+
+ hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ /* skb was released by skb_share_check() */
+ if (!skb)
+ goto err_out;
+
+ /* packet should hold at least type and version */
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ goto err_free;
+
+ /* expect a valid ethernet header here. */
+ if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
+ goto err_free;
+
+ if (!hard_iface->soft_iface)
+ goto err_free;
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ goto err_free;
+
+ /* discard frames on not active interfaces */
+ if (hard_iface->if_status != IF_ACTIVE)
+ goto err_free;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
+
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batman_ogm_packet->header.version);
+ goto err_free;
+ }
+
+ /* all receive handlers return whether they received or reused
+ * the supplied skb. if not, we have to free the skb.
+ */
+ idx = batman_ogm_packet->header.packet_type;
+ ret = (*recv_packet_handler[idx])(skb, hard_iface);
+
+ if (ret == NET_RX_DROP)
+ kfree_skb(skb);
+
+ /* return NET_RX_SUCCESS in any case as we
+ * most probably dropped the packet for
+ * routing-logical reasons.
+ */
+ return NET_RX_SUCCESS;
+
+err_free:
+ kfree_skb(skb);
+err_out:
+ return NET_RX_DROP;
+}
+
+static void recv_handler_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
+ recv_packet_handler[i] = recv_unhandled_packet;
+
+ /* batman icmp packet */
+ recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
+ /* unicast packet */
+ recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
+ /* fragmented unicast packet */
+ recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
+ /* broadcast packet */
+ recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
+ /* vis packet */
+ recv_packet_handler[BAT_VIS] = recv_vis_packet;
+ /* Translation table query (request or response) */
+ recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
+ /* Roaming advertisement */
+ recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
+}
+
+int recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct hard_iface *))
+{
+ if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
+ return -EBUSY;
+
+ recv_packet_handler[packet_type] = recv_handler;
+ return 0;
+}
+
+void recv_handler_unregister(uint8_t packet_type)
+{
+ recv_packet_handler[packet_type] = recv_unhandled_packet;
+}
+
static struct bat_algo_ops *bat_algo_get(char *name)
{
struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
@@ -210,11 +329,10 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
/* all algorithms must implement all ops (for now) */
if (!bat_algo_ops->bat_iface_enable ||
!bat_algo_ops->bat_iface_disable ||
+ !bat_algo_ops->bat_iface_update_mac ||
!bat_algo_ops->bat_primary_iface_set ||
- !bat_algo_ops->bat_ogm_update_mac ||
!bat_algo_ops->bat_ogm_schedule ||
- !bat_algo_ops->bat_ogm_emit ||
- !bat_algo_ops->bat_ogm_receive) {
+ !bat_algo_ops->bat_ogm_emit) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
goto out;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d9832acf558d..f4a3ec003479 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2012.1.0"
+#define SOURCE_VERSION "2012.2.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -155,6 +155,12 @@ void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
int is_my_mac(const uint8_t *addr);
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev);
+int recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct hard_iface *));
+void recv_handler_unregister(uint8_t packet_type);
int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
int bat_algo_select(struct bat_priv *bat_priv, char *name);
int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ce4969885894..41147942ba53 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -35,7 +35,8 @@ static void purge_orig(struct work_struct *work);
static void start_purge_timer(struct bat_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
- queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
+ queue_delayed_work(bat_event_workqueue,
+ &bat_priv->orig_work, msecs_to_jiffies(1000));
}
/* returns 1 if they are the same originator */
@@ -84,35 +85,30 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
return router;
}
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const uint8_t *neigh,
- struct hard_iface *if_incoming)
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ uint32_t seqno)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct neigh_node *neigh_node;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new last-hop neighbor of originator\n");
-
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
if (!neigh_node)
- return NULL;
+ goto out;
INIT_HLIST_NODE(&neigh_node->list);
- INIT_LIST_HEAD(&neigh_node->bonding_list);
- spin_lock_init(&neigh_node->tq_lock);
- memcpy(neigh_node->addr, neigh, ETH_ALEN);
- neigh_node->orig_node = orig_neigh_node;
- neigh_node->if_incoming = if_incoming;
+ memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
+ spin_lock_init(&neigh_node->lq_update_lock);
/* extra reference for return */
atomic_set(&neigh_node->refcount, 2);
- spin_lock_bh(&orig_node->neigh_list_lock);
- hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
- spin_unlock_bh(&orig_node->neigh_list_lock);
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM, initial seqno %d\n",
+ neigh_addr, seqno);
+
+out:
return neigh_node;
}
@@ -274,6 +270,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
struct hlist_node *node, *node_tmp;
struct neigh_node *neigh_node;
bool neigh_purged = false;
+ unsigned long last_seen;
*best_neigh_node = NULL;
@@ -283,11 +280,13 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
- if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) ||
+ if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
(neigh_node->if_incoming->if_status == IF_INACTIVE) ||
(neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
(neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
+ last_seen = neigh_node->last_seen;
+
if ((neigh_node->if_incoming->if_status ==
IF_INACTIVE) ||
(neigh_node->if_incoming->if_status ==
@@ -300,9 +299,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
neigh_node->if_incoming->net_dev->name);
else
bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n",
+ "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
orig_node->orig, neigh_node->addr,
- (neigh_node->last_valid / HZ));
+ jiffies_to_msecs(last_seen));
neigh_purged = true;
@@ -325,10 +324,11 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
{
struct neigh_node *best_neigh_node;
- if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) {
+ if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Originator timeout: originator %pM, last_valid %lu\n",
- orig_node->orig, (orig_node->last_valid / HZ));
+ "Originator timeout: originator %pM, last_seen %u\n",
+ orig_node->orig,
+ jiffies_to_msecs(orig_node->last_seen));
return true;
} else {
if (purge_orig_neighbors(bat_priv, orig_node,
@@ -446,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
goto next;
last_seen_secs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) / 1000;
+ orig_node->last_seen) / 1000;
last_seen_msecs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) % 1000;
+ orig_node->last_seen) % 1000;
seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
orig_node->orig, last_seen_secs,
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 3fe2eda85652..f74d0d693359 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv);
void purge_orig_ref(struct bat_priv *bat_priv);
void orig_node_free_ref(struct orig_node *orig_node);
struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const uint8_t *neigh,
- struct hard_iface *if_incoming);
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ uint32_t seqno);
void neigh_node_free_ref(struct neigh_node *neigh_node);
struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
int orig_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index f54969c61a1e..0ee1af770798 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -39,6 +39,7 @@ enum bat_packettype {
#define COMPAT_VERSION 14
enum batman_iv_flags {
+ NOT_BEST_NEXT_HOP = 1 << 3,
PRIMARIES_FIRST_HOP = 1 << 4,
VIS_SERVER = 1 << 5,
DIRECTLINK = 1 << 6
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index ff560863bc74..840e2c64a301 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -234,51 +234,46 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
{
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
(seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
-
- *last_reset = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "old packet received, start protection\n");
-
- return 0;
- } else {
+ if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
return 1;
- }
+
+ *last_reset = jiffies;
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "old packet received, start protection\n");
}
+
return 0;
}
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
+bool check_management_packet(struct sk_buff *skb,
+ struct hard_iface *hard_iface,
+ int header_len)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_HLEN)))
- return NET_RX_DROP;
+ if (unlikely(!pskb_may_pull(skb, header_len)))
+ return false;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
- return NET_RX_DROP;
+ return false;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
- return NET_RX_DROP;
+ return false;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, 0) < 0)
- return NET_RX_DROP;
+ return false;
/* keep skb linear */
if (skb_linearize(skb) < 0)
- return NET_RX_DROP;
+ return false;
- bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
-
- kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return true;
}
static int recv_my_icmp_packet(struct bat_priv *bat_priv,
@@ -918,12 +913,20 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
/* Check whether I have to reroute the packet */
if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
- /* Linearize the skb before accessing it */
- if (skb_linearize(skb) < 0)
+ /* check if there is enough data before accessing it */
+ if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
+ ETH_HLEN) < 0)
return 0;
ethhdr = (struct ethhdr *)(skb->data +
sizeof(struct unicast_packet));
+
+ /* we don't have an updated route for this client, so we should
+ * not try to reroute the packet!!
+ */
+ if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ return 1;
+
orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
if (!orig_node) {
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 3d729cb17113..d6bbbebb6567 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,6 +23,9 @@
#define _NET_BATMAN_ADV_ROUTING_H_
void slide_own_bcast_window(struct hard_iface *hard_iface);
+bool check_management_packet(struct sk_buff *skb,
+ struct hard_iface *hard_iface,
+ int header_len);
void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
struct neigh_node *neigh_node);
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
@@ -30,7 +33,6 @@ int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 7c66b6121fa6..f47299f22c68 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -45,8 +45,8 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
goto send_skb_err;
if (!(hard_iface->net_dev->flags & IFF_UP)) {
- pr_warning("Interface %s is not up - can't send packet via that interface!\n",
- hard_iface->net_dev->name);
+ pr_warn("Interface %s is not up - can't send packet via that interface!\n",
+ hard_iface->net_dev->name);
goto send_skb_err;
}
@@ -292,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
_add_bcast_packet_to_list(bat_priv, forw_packet,
- ((5 * HZ) / 1000));
+ msecs_to_jiffies(5));
return;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a38d315d3cd6..a66c2dcd1088 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -206,6 +206,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (tt_local_entry) {
tt_local_entry->last_seen = jiffies;
+ /* possibly unset the TT_CLIENT_PENDING flag */
+ tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
goto out;
}
@@ -2117,3 +2119,22 @@ request_table:
}
}
}
+
+/* returns true whether we know that the client has moved from its old
+ * originator to another one. This entry is kept is still kept for consistency
+ * purposes
+ */
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
+{
+ struct tt_global_entry *tt_global_entry;
+ bool ret = false;
+
+ tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ if (!tt_global_entry)
+ goto out;
+
+ ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
+ tt_global_entry_free_ref(tt_global_entry);
+out:
+ return ret;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index bfebe26edd8e..c43374dc364d 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -53,5 +53,7 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, uint8_t tt_num_changes,
uint8_t ttvn, uint16_t tt_crc);
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
+
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 2f4848b776a7..61308e8016ff 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -52,7 +52,7 @@ struct hard_iface {
/**
* orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
- * @last_valid: when last packet from this node was received
+ * @last_seen: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
* @gw_flags: flags related to gateway class
@@ -70,7 +70,7 @@ struct orig_node {
struct neigh_node __rcu *router; /* rcu protected pointer */
unsigned long *bcast_own;
uint8_t *bcast_own_sum;
- unsigned long last_valid;
+ unsigned long last_seen;
unsigned long bcast_seqno_reset;
unsigned long batman_seqno_reset;
uint8_t gw_flags;
@@ -120,7 +120,7 @@ struct gw_node {
/**
* neigh_node
- * @last_valid: when last packet via this neighbor was received
+ * @last_seen: when last packet via this neighbor was received
*/
struct neigh_node {
struct hlist_node list;
@@ -131,13 +131,13 @@ struct neigh_node {
uint8_t tq_avg;
uint8_t last_ttl;
struct list_head bonding_list;
- unsigned long last_valid;
+ unsigned long last_seen;
DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
atomic_t refcount;
struct rcu_head rcu;
struct orig_node *orig_node;
struct hard_iface *if_incoming;
- spinlock_t tq_lock; /* protects: tq_recv, tq_index */
+ spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
};
#ifdef CONFIG_BATMAN_ADV_BLA
@@ -381,18 +381,17 @@ struct bat_algo_ops {
int (*bat_iface_enable)(struct hard_iface *hard_iface);
/* de-init routing info when hard-interface is disabled */
void (*bat_iface_disable)(struct hard_iface *hard_iface);
+ /* (re-)init mac addresses of the protocol information
+ * belonging to this hard-interface
+ */
+ void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
/* called when primary interface is selected / changed */
void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
- /* init mac addresses of the OGM belonging to this hard-interface */
- void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
/* prepare a new outgoing OGM for the send queue */
void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
int tt_num_changes);
/* send scheduled OGM */
void (*bat_ogm_emit)(struct forw_packet *forw_packet);
- /* receive incoming OGM */
- void (*bat_ogm_receive)(struct hard_iface *if_incoming,
- struct sk_buff *skb);
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 676f6a626b2c..74175c210858 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -331,6 +331,14 @@ find_router:
unicast_packet->ttvn =
(uint8_t)atomic_read(&orig_node->last_ttvn);
+ /* inform the destination node that we are still missing a correct route
+ * for this client. The destination will receive this packet and will
+ * try to reroute it because the ttvn contained in the header is less
+ * than the current one
+ */
+ if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ unicast_packet->ttvn = unicast_packet->ttvn - 1;
+
if (atomic_read(&bat_priv->fragmentation) &&
data_len + sizeof(*unicast_packet) >
neigh_node->if_incoming->net_dev->mtu) {
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 4fab4362b5aa..031d7d656754 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
BT_DBG("");
list_for_each_entry(s, &bnep_session_list, list)
- if (!compare_ether_addr(dst, s->eh.h_source))
+ if (ether_addr_equal(dst, s->eh.h_source))
return s;
return NULL;
@@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
iv[il++] = (struct kvec) { &type, 1 };
len++;
- if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source))
+ if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
type |= 0x01;
- if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest))
+ if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
type |= 0x02;
if (type)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d6e5929458b1..929e48aed444 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -170,7 +170,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
- if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+ if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
br_fdb_change_mac_address(br, addr->sa_data);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5945c54bc2de..d21f32383517 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_port *op;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
- !compare_ether_addr(op->dev->dev_addr,
- f->addr.addr)) {
+ ether_addr_equal(op->dev->dev_addr,
+ f->addr.addr)) {
f->dst = op;
goto insert;
}
@@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *op;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
- !compare_ether_addr(op->dev->dev_addr,
- f->addr.addr)) {
+ ether_addr_equal(op->dev->dev_addr,
+ f->addr.addr)) {
f->dst = op;
goto skip_delete;
}
@@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr)) {
+ if (ether_addr_equal(fdb->addr.addr, addr)) {
if (unlikely(has_expired(br, fdb)))
break;
return fdb;
@@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry(fdb, h, head, hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr))
+ if (ether_addr_equal(fdb->addr.addr, addr))
return fdb;
}
return NULL;
@@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, head, hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr))
+ if (ether_addr_equal(fdb->addr.addr, addr))
return fdb;
}
return NULL;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5a31731be4d0..76f15fda0212 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -216,7 +216,7 @@ forward:
}
/* fall through */
case BR_STATE_LEARNING:
- if (!compare_ether_addr(p->br->dev->dev_addr, dest))
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 5ca4c50ea233..b66581208cb2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -460,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
hopopt[3] = 2; /* Length of RA Option */
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
hopopt[5] = 0;
- hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
- hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
+ hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
+ hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
skb_put(skb, sizeof(*ip6h) + 8);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dce55d4ee83b..e41456bd3cc6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -558,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb)
int optlen = nh[off + 1] + 2;
switch (nh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index e16aade51ae0..fd30a6022dea 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (p->state == BR_STATE_DISABLED)
goto out;
- if (compare_ether_addr(dest, br->group_addr) != 0)
+ if (!ether_addr_equal(dest, br->group_addr))
goto out;
buf = skb_pull(skb, 3);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index f494496373d6..9d5a414a3943 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
/* called under bridge lock */
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
{
- /* should be aligned on 2 bytes for compare_ether_addr() */
+ /* should be aligned on 2 bytes for ether_addr_equal() */
unsigned short oldaddr_aligned[ETH_ALEN >> 1];
unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
struct net_bridge_port *p;
@@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
memcpy(br->dev->dev_addr, addr, ETH_ALEN);
list_for_each_entry(p, &br->port_list, list) {
- if (!compare_ether_addr(p->designated_bridge.addr, oldaddr))
+ if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
- if (!compare_ether_addr(p->designated_root.addr, oldaddr))
+ if (ether_addr_equal(p->designated_root.addr, oldaddr))
memcpy(p->designated_root.addr, addr, ETH_ALEN);
-
}
br_configuration_update(br);
@@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
br_become_root_bridge(br);
}
-/* should be aligned on 2 bytes for compare_ether_addr() */
+/* should be aligned on 2 bytes for ether_addr_equal() */
static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
@@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
}
- if (compare_ether_addr(br->bridge_id.addr, addr) == 0)
+ if (ether_addr_equal(br->bridge_id.addr, addr))
return false; /* no change */
br_stp_change_bridge_id(br, addr);
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 5b33a2e634a6..071d87214dde 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
!(info->bitmask & EBT_STP_MASK))
return -EINVAL;
/* Make sure the match only receives stp frames */
- if (compare_ether_addr(e->destmac, bridge_ula) ||
- compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+ if (!ether_addr_equal(e->destmac, bridge_ula) ||
+ !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
return -EINVAL;
return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 0dccdb3c7d26..fb8944355264 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -131,10 +131,9 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
- if (net_ratelimit())
- pr_debug("sending flow OFF (queue len = %d %d)\n",
- atomic_read(&cf_sk->sk.sk_rmem_alloc),
- sk_rcvbuf_lowwater(cf_sk));
+ net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- if (net_ratelimit())
- pr_debug("sending flow OFF due to rmem_schedule\n");
+ net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index a2be59fe6ab8..cd0981977f5c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -300,10 +300,9 @@ static const unsigned short netdev_lock_type[] =
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
- ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
- ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
- ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
- ARPHRD_VOID, ARPHRD_NONE};
+ ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
+ ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
+ ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -318,10 +317,9 @@ static const char *const netdev_lock_name[] =
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
- "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
- "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
- "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
- "_xmit_VOID", "_xmit_NONE"};
+ "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
+ "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
+ "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1618,10 +1616,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
}
skb->skb_iif = 0;
- skb_set_dev(skb, dev);
+ skb->dev = dev;
+ skb_dst_drop(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1673,10 +1675,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (skb_network_header(skb2) < skb2->data ||
skb2->network_header > skb2->tail) {
- if (net_ratelimit())
- pr_crit("protocol %04x is buggy, dev %s\n",
- ntohs(skb2->protocol),
- dev->name);
+ net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
+ ntohs(skb2->protocol),
+ dev->name);
skb_reset_network_header(skb2);
}
@@ -1870,36 +1871,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
-/**
- * skb_dev_set -- assign a new device to a buffer
- * @skb: buffer for the new device
- * @dev: network device
- *
- * If an skb is owned by a device already, we have to reset
- * all data private to the namespace a device belongs to
- * before assigning it a new device.
- */
-#ifdef CONFIG_NET_NS
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
- skb_dst_drop(skb);
- if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
- secpath_reset(skb);
- nf_reset(skb);
- skb_init_secmark(skb);
- skb->mark = 0;
- skb->priority = 0;
- skb->nf_trace = 0;
- skb->ipvs_property = 0;
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
-#endif
- }
- skb->dev = dev;
-}
-EXPORT_SYMBOL(skb_set_dev);
-#endif /* CONFIG_NET_NS */
-
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features = 0;
@@ -2343,11 +2314,9 @@ EXPORT_SYMBOL(__skb_tx_hash);
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
- if (net_ratelimit()) {
- pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
- }
+ net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
return 0;
}
return queue_index;
@@ -2589,17 +2558,15 @@ int dev_queue_xmit(struct sk_buff *skb)
}
}
HARD_TX_UNLOCK(dev, txq);
- if (net_ratelimit())
- pr_crit("Virtual device %s asks to queue packet!\n",
- dev->name);
+ net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
+ dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately
*/
recursion_alert:
- if (net_ratelimit())
- pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
- dev->name);
+ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
}
}
@@ -3080,9 +3047,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
struct Qdisc *q;
if (unlikely(MAX_RED_LOOP < ttl++)) {
- if (net_ratelimit())
- pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
- skb->skb_iif, dev->ifindex);
+ net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
+ skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
}
@@ -3636,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
}
EXPORT_SYMBOL(napi_frags_finish);
-struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
struct ethhdr *eth;
@@ -3671,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi)
out:
return skb;
}
-EXPORT_SYMBOL(napi_frags_skb);
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index a7cad741df01..3252e7e0a005 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -4,6 +4,8 @@
* Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -22,6 +24,7 @@
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/genetlink.h>
#include <net/netevent.h>
@@ -261,9 +264,15 @@ static int set_all_monitor_traces(int state)
switch (state) {
case TRACE_ON:
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -ENODEV;
+ break;
+ }
+
rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
break;
+
case TRACE_OFF:
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
@@ -279,6 +288,9 @@ static int set_all_monitor_traces(int state)
kfree_rcu(new_stat, rcu);
}
}
+
+ module_put(THIS_MODULE);
+
break;
default:
rc = 1;
@@ -381,10 +393,10 @@ static int __init init_net_drop_monitor(void)
struct per_cpu_dm_data *data;
int cpu, rc;
- printk(KERN_INFO "Initializing network drop monitor service\n");
+ pr_info("Initializing network drop monitor service\n");
if (sizeof(void *) > 8) {
- printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n");
+ pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
return -ENOSPC;
}
@@ -392,19 +404,19 @@ static int __init init_net_drop_monitor(void)
dropmon_ops,
ARRAY_SIZE(dropmon_ops));
if (rc) {
- printk(KERN_ERR "Could not create drop monitor netlink family\n");
+ pr_err("Could not create drop monitor netlink family\n");
return rc;
}
rc = register_netdevice_notifier(&dropmon_net_notifier);
if (rc < 0) {
- printk(KERN_CRIT "Failed to register netdevice notifier\n");
+ pr_crit("Failed to register netdevice notifier\n");
goto out_unreg;
}
rc = 0;
- for_each_present_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
data->cpu = cpu;
INIT_WORK(&data->dm_alert_work, send_dm_alert);
@@ -423,4 +435,36 @@ out:
return rc;
}
-late_initcall(init_net_drop_monitor);
+static void exit_net_drop_monitor(void)
+{
+ struct per_cpu_dm_data *data;
+ int cpu;
+
+ BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+
+ /*
+ * Because of the module_get/put we do in the trace state change path
+ * we are guarnateed not to have any current users when we get here
+ * all we need to do is make sure that we don't have any running timers
+ * or pending schedule calls
+ */
+
+ for_each_possible_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+ del_timer_sync(&data->send_timer);
+ cancel_work_sync(&data->dm_alert_work);
+ /*
+ * At this point, we should have exclusive access
+ * to this struct and can free the skb inside it
+ */
+ kfree_skb(data->skb);
+ }
+
+ BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+}
+
+module_init(init_net_drop_monitor);
+module_exit(exit_net_drop_monitor);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index beacdd93cd8f..9c2afb480270 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -751,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
return 0;
}
-static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
+ int (*getter)(struct net_device *,
+ struct ethtool_eeprom *, u8 *),
+ u32 total_len)
{
struct ethtool_eeprom eeprom;
- const struct ethtool_ops *ops = dev->ethtool_ops;
void __user *userbuf = useraddr + sizeof(eeprom);
u32 bytes_remaining;
u8 *data;
int ret = 0;
- if (!ops->get_eeprom || !ops->get_eeprom_len)
- return -EOPNOTSUPP;
-
if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
return -EFAULT;
@@ -771,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
return -EINVAL;
/* Check for exceeding total eeprom len */
- if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ if (eeprom.offset + eeprom.len > total_len)
return -EINVAL;
data = kmalloc(PAGE_SIZE, GFP_USER);
@@ -782,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
while (bytes_remaining > 0) {
eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
- ret = ops->get_eeprom(dev, &eeprom, data);
+ ret = getter(dev, &eeprom, data);
if (ret)
break;
if (copy_to_user(userbuf, data, eeprom.len)) {
@@ -803,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
+static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
+ ops->get_eeprom_len(dev));
+}
+
static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
{
struct ethtool_eeprom eeprom;
@@ -1325,6 +1335,47 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
return err;
}
+static int ethtool_get_module_info(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ struct ethtool_modinfo modinfo;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_module_info)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
+ return -EFAULT;
+
+ ret = ops->get_module_info(dev, &modinfo);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_get_module_eeprom(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ struct ethtool_modinfo modinfo;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_module_info || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ ret = ops->get_module_info(dev, &modinfo);
+ if (ret)
+ return ret;
+
+ return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+ modinfo.eeprom_len);
+}
+
/* The main entry point in this file. Called from net/core/dev.c */
int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1549,6 +1600,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GET_TS_INFO:
rc = ethtool_get_ts_info(dev, useraddr);
break;
+ case ETHTOOL_GMODULEINFO:
+ rc = ethtool_get_module_info(dev, useraddr);
+ break;
+ case ETHTOOL_GMODULEEEPROM:
+ rc = ethtool_get_module_eeprom(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index fadaa819b854..eb09f8bbbf07 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,8 @@
* Harald Welte Add neighbour cache statistics like rtstat
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -712,14 +714,13 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
- printk(KERN_WARNING
- "Destroying alive neighbour %p\n", neigh);
+ pr_warn("Destroying alive neighbour %p\n", neigh);
dump_stack();
return;
}
if (neigh_del_timer(neigh))
- printk(KERN_WARNING "Impossible event.\n");
+ pr_warn("Impossible event\n");
skb_queue_purge(&neigh->arp_queue);
neigh->arp_queue_len_bytes = 0;
@@ -1554,8 +1555,8 @@ void neigh_table_init(struct neigh_table *tbl)
write_unlock(&neigh_tbl_lock);
if (unlikely(tmp)) {
- printk(KERN_ERR "NEIGH: Registering multiple tables for "
- "family %d\n", tbl->family);
+ pr_err("Registering multiple tables for family %d\n",
+ tbl->family);
dump_stack();
}
}
@@ -1571,7 +1572,7 @@ int neigh_table_clear(struct neigh_table *tbl)
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
- printk(KERN_CRIT "neighbour leakage\n");
+ pr_crit("neighbour leakage\n");
write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 31a5ae51a45c..dddbacb8f28c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
@@ -212,8 +214,8 @@ static void net_free(struct net *net)
{
#ifdef NETNS_REFCNT_DEBUG
if (unlikely(atomic_read(&net->use_count) != 0)) {
- printk(KERN_EMERG "network namespace not free! Usage: %d\n",
- atomic_read(&net->use_count));
+ pr_emerg("network namespace not free! Usage: %d\n",
+ atomic_read(&net->use_count));
return;
}
#endif
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ba6900f73900..09eda68b6763 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -9,6 +9,8 @@
* Authors: Neil Horman <nhorman@tuxdriver.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -88,7 +90,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
old_priomap = rtnl_dereference(dev->priomap);
if (!new_priomap) {
- printk(KERN_WARNING "Unable to alloc new priomap!\n");
+ pr_warn("Unable to alloc new priomap!\n");
return;
}
@@ -136,7 +138,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
ret = get_prioidx(&cs->prioidx);
if (ret != 0) {
- printk(KERN_WARNING "No space in priority index array\n");
+ pr_warn("No space in priority index array\n");
kfree(cs);
return ERR_PTR(ret);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index ffb5d382f241..cce9e53528b1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -891,8 +891,8 @@ static ssize_t pktgen_if_write(struct file *file,
if (copy_from_user(tb, user_buffer, copy))
return -EFAULT;
tb[copy] = 0;
- printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
- (unsigned long)count, tb);
+ pr_debug("%s,%lu buffer -:%s:-\n",
+ name, (unsigned long)count, tb);
}
if (!strcmp(name, "min_pkt_size")) {
@@ -1261,8 +1261,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_min;
}
if (debug)
- printk(KERN_DEBUG "pktgen: dst_min set to: %s\n",
- pkt_dev->dst_min);
+ pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
i += len;
sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
return count;
@@ -1284,8 +1283,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_max;
}
if (debug)
- printk(KERN_DEBUG "pktgen: dst_max set to: %s\n",
- pkt_dev->dst_max);
+ pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
i += len;
sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
return count;
@@ -1307,7 +1305,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
if (debug)
- printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
+ pr_debug("dst6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6=%s", buf);
@@ -1329,7 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
if (debug)
- printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
+ pr_debug("dst6_min set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_min=%s", buf);
@@ -1350,7 +1348,7 @@ static ssize_t pktgen_if_write(struct file *file,
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
if (debug)
- printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
+ pr_debug("dst6_max set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_max=%s", buf);
@@ -1373,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
if (debug)
- printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
+ pr_debug("src6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: src6=%s", buf);
@@ -1394,8 +1392,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_min;
}
if (debug)
- printk(KERN_DEBUG "pktgen: src_min set to: %s\n",
- pkt_dev->src_min);
+ pr_debug("src_min set to: %s\n", pkt_dev->src_min);
i += len;
sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
return count;
@@ -1415,8 +1412,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_max;
}
if (debug)
- printk(KERN_DEBUG "pktgen: src_max set to: %s\n",
- pkt_dev->src_max);
+ pr_debug("src_max set to: %s\n", pkt_dev->src_max);
i += len;
sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
return count;
@@ -1527,7 +1523,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n");
+ pr_debug("VLAN/SVLAN auto turned off\n");
}
return count;
}
@@ -1542,10 +1538,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->vlan_id = value; /* turn on VLAN */
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN turned on\n");
+ pr_debug("VLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+ pr_debug("MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
@@ -1554,7 +1550,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+ pr_debug("VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1599,10 +1595,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = value; /* turn on SVLAN */
if (debug)
- printk(KERN_DEBUG "pktgen: SVLAN turned on\n");
+ pr_debug("SVLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+ pr_debug("MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
@@ -1611,7 +1607,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+ pr_debug("VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1779,8 +1775,7 @@ static ssize_t pktgen_thread_write(struct file *file,
i += len;
if (debug)
- printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n",
- name, (unsigned long)count);
+ pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
if (!t) {
pr_err("ERROR: No thread\n");
@@ -1931,7 +1926,7 @@ static int pktgen_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
return NOTIFY_DONE;
/* It is OK that we do not hold the group lock right now,
@@ -2934,8 +2929,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
if (datalen < sizeof(struct pktgen_hdr)) {
datalen = sizeof(struct pktgen_hdr);
- if (net_ratelimit())
- pr_info("increased datalen to %d\n", datalen);
+ net_info_ratelimited("increased datalen to %d\n", datalen);
}
udph->source = htons(pkt_dev->cur_udp_src);
@@ -3365,8 +3359,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->errors++;
break;
default: /* Drivers are not supposed to return other values! */
- if (net_ratelimit())
- pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret);
+ net_info_ratelimited("%s xmit error: %d\n",
+ pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
@@ -3755,12 +3749,18 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
+ LIST_HEAD(list);
/* Stop all interfaces & threads */
pktgen_exiting = true;
- list_for_each_safe(q, n, &pktgen_threads) {
+ mutex_lock(&pktgen_thread_lock);
+ list_splice_init(&pktgen_threads, &list);
+ mutex_unlock(&pktgen_thread_lock);
+
+ list_for_each_safe(q, n, &list) {
t = list_entry(q, struct pktgen_thread, th_list);
+ list_del(&t->th_list);
kthread_stop(t->tsk);
kfree(t);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b442d35bbc8b..21318d15bbc3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1524,11 +1524,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
err = 0;
errout:
- if (err < 0 && modified && net_ratelimit())
- printk(KERN_WARNING "A link change request failed with "
- "some changes committed already. Interface %s may "
- "have been left with an inconsistent configuration, "
- "please check.\n", dev->name);
+ if (err < 0 && modified)
+ net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
+ dev->name);
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2c35da818ef9..016694d62484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -36,6 +36,8 @@
* The functions in this file will not compile correctly with gcc 2.4.x
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -118,11 +120,10 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
*/
static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{
- printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
- "data:%p tail:%#lx end:%#lx dev:%s\n",
- here, skb->len, sz, skb->head, skb->data,
- (unsigned long)skb->tail, (unsigned long)skb->end,
- skb->dev ? skb->dev->name : "<NULL>");
+ pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+ __func__, here, skb->len, sz, skb->head, skb->data,
+ (unsigned long)skb->tail, (unsigned long)skb->end,
+ skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
@@ -137,11 +138,10 @@ static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{
- printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
- "data:%p tail:%#lx end:%#lx dev:%s\n",
- here, skb->len, sz, skb->head, skb->data,
- (unsigned long)skb->tail, (unsigned long)skb->end,
- skb->dev ? skb->dev->name : "<NULL>");
+ pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+ __func__, here, skb->len, sz, skb->head, skb->data,
+ (unsigned long)skb->tail, (unsigned long)skb->end,
+ skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
@@ -293,6 +293,46 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
}
EXPORT_SYMBOL(build_skb);
+struct netdev_alloc_cache {
+ struct page *page;
+ unsigned int offset;
+};
+static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+ struct netdev_alloc_cache *nc;
+ void *data = NULL;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ nc = &__get_cpu_var(netdev_alloc_cache);
+ if (unlikely(!nc->page)) {
+refill:
+ nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ nc->offset = 0;
+ }
+ if (likely(nc->page)) {
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ put_page(nc->page);
+ goto refill;
+ }
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ get_page(nc->page);
+ }
+ local_irq_restore(flags);
+ return data;
+}
+EXPORT_SYMBOL(netdev_alloc_frag);
+
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
@@ -307,11 +347,23 @@ EXPORT_SYMBOL(build_skb);
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length, gfp_t gfp_mask)
+ unsigned int length, gfp_t gfp_mask)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+ void *data = netdev_alloc_frag(fragsz);
+
+ if (likely(data)) {
+ skb = build_skb(data, fragsz);
+ if (unlikely(!skb))
+ put_page(virt_to_head_page(data));
+ }
+ } else {
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ }
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
@@ -330,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
}
EXPORT_SYMBOL(skb_add_rx_frag);
-/**
- * dev_alloc_skb - allocate an skbuff for receiving
- * @length: length to allocate
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned if there is no free memory. Although this function
- * allocates memory it can be called from an interrupt.
- */
-struct sk_buff *dev_alloc_skb(unsigned int length)
-{
- /*
- * There is more code here than it seems:
- * __dev_alloc_skb is an inline
- */
- return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-EXPORT_SYMBOL(dev_alloc_skb);
-
static void skb_drop_list(struct sk_buff **listp)
{
struct sk_buff *list = *listp;
@@ -3299,10 +3329,8 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
if (unlikely(start > skb_headlen(skb)) ||
unlikely((int)start + off > skb_headlen(skb) - 2)) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb_headlen(skb));
+ net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
+ start, off, skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -3314,8 +3342,93 @@ EXPORT_SYMBOL_GPL(skb_partial_csum_set);
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
- if (net_ratelimit())
- pr_warning("%s: received packets cannot be forwarded"
- " while LRO is enabled\n", skb->dev->name);
+ net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
+ skb->dev->name);
}
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
+{
+ if (head_stolen)
+ kmem_cache_free(skbuff_head_cache, skb);
+ else
+ __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb_partial);
+
+/**
+ * skb_try_coalesce - try to merge skb to prior one
+ * @to: prior buffer
+ * @from: buffer to add
+ * @fragstolen: pointer to boolean
+ *
+ */
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize)
+{
+ int i, delta, len = from->len;
+
+ *fragstolen = false;
+
+ if (skb_cloned(to))
+ return false;
+
+ if (len <= skb_tailroom(to)) {
+ BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+ *delta_truesize = 0;
+ return true;
+ }
+
+ if (skb_has_frag_list(to) || skb_has_frag_list(from))
+ return false;
+
+ if (skb_headlen(from) != 0) {
+ struct page *page;
+ unsigned int offset;
+
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ return false;
+
+ if (skb_head_is_locked(from))
+ return false;
+
+ delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+ page = virt_to_head_page(from->head);
+ offset = from->data - (unsigned char *)page_address(page);
+
+ skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
+ page, offset, skb_headlen(from));
+ *fragstolen = true;
+ } else {
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
+ return false;
+
+ delta = from->truesize -
+ SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+ }
+
+ WARN_ON_ONCE(delta < len);
+
+ memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
+ skb_shinfo(from)->frags,
+ skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
+ skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
+
+ if (!skb_cloned(from))
+ skb_shinfo(from)->nr_frags = 0;
+
+ /* if the skb is cloned this does nothing since we set nr_frags to 0 */
+ for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
+ skb_frag_ref(from, i);
+
+ to->truesize += delta;
+ to->len += len;
+ to->data_len += len;
+
+ *delta_truesize = delta;
+ return true;
+}
+EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/core/sock.c b/net/core/sock.c
index 26ed27fb2bfb..5efcd6307fa7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -89,6 +89,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -297,9 +299,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
*timeo_p = 0;
if (warned < 10 && net_ratelimit()) {
warned++;
- printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
- "tries to set negative timeout\n",
- current->comm, task_pid_nr(current));
+ pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
+ __func__, current->comm, task_pid_nr(current));
}
return 0;
}
@@ -317,8 +318,8 @@ static void sock_warn_obsolete_bsdism(const char *name)
static char warncomm[TASK_COMM_LEN];
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
- printk(KERN_WARNING "process `%s' is using obsolete "
- "%s SO_BSDCOMPAT\n", warncomm, name);
+ pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
+ warncomm, name);
warned++;
}
}
@@ -849,7 +850,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_BROADCAST:
- v.val = !!sock_flag(sk, SOCK_BROADCAST);
+ v.val = sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
@@ -865,7 +866,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_KEEPALIVE:
- v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+ v.val = sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
@@ -887,7 +888,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_OOBINLINE:
- v.val = !!sock_flag(sk, SOCK_URGINLINE);
+ v.val = sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
@@ -900,7 +901,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_LINGER:
lv = sizeof(v.ling);
- v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
+ v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
@@ -1012,11 +1013,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_RXQ_OVFL:
- v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
+ v.val = sock_flag(sk, SOCK_RXQ_OVFL);
break;
case SO_WIFI_STATUS:
- v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+ v.val = sock_flag(sk, SOCK_WIFI_STATUS);
break;
case SO_PEEK_OFF:
@@ -1026,7 +1027,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_peek_off;
break;
case SO_NOFCS:
- v.val = !!sock_flag(sk, SOCK_NOFCS);
+ v.val = sock_flag(sk, SOCK_NOFCS);
break;
default:
return -ENOPROTOOPT;
@@ -1238,8 +1239,8 @@ static void __sk_free(struct sock *sk)
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
if (atomic_read(&sk->sk_omem_alloc))
- printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
- __func__, atomic_read(&sk->sk_omem_alloc));
+ pr_debug("%s: optmem leakage (%d bytes) detected\n",
+ __func__, atomic_read(&sk->sk_omem_alloc));
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
@@ -2424,7 +2425,7 @@ static void assign_proto_idx(struct proto *prot)
prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
- printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
+ pr_err("PROTO_INUSE_NR exhausted\n");
return;
}
@@ -2454,8 +2455,8 @@ int proto_register(struct proto *prot, int alloc_slab)
NULL);
if (prot->slab == NULL) {
- printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
- prot->name);
+ pr_crit("%s: Can't create sock SLAB cache!\n",
+ prot->name);
goto out;
}
@@ -2469,8 +2470,8 @@ int proto_register(struct proto *prot, int alloc_slab)
SLAB_HWCACHE_ALIGN, NULL);
if (prot->rsk_prot->slab == NULL) {
- printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
- prot->name);
+ pr_crit("%s: Can't create request sock SLAB cache!\n",
+ prot->name);
goto out_free_request_sock_slab_name;
}
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7065c0ae1e7b..6c7c78b83940 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
default:
dccp_pr_debug("packet_type=%s\n",
dccp_packet_name(dh->dccph_type));
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
}
verify_sock_status:
if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
len = skb->len;
found_fin_ok:
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
break;
} while (1);
out:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 65a8cd7891fe..7eaf98799729 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -438,9 +438,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
res->fi = NULL;
return 1;
default:
- if (net_ratelimit())
- printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
- type);
+ net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
+ type);
res->fi = NULL;
return -EINVAL;
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ee7013f24fca..ac90f658586c 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -162,8 +162,8 @@ static int dn_neigh_construct(struct neighbour *neigh)
else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
dn_dn2eth(neigh->ha, dn->addr);
else {
- if (net_ratelimit())
- printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type);
+ net_dbg_ratelimited("Trying to create neigh for hw %d\n",
+ dev->type);
return -EINVAL;
}
@@ -236,15 +236,13 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_long_output: no memory\n");
+ net_crit_ratelimited("dn_long_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_long_output: Increasing headroom\n");
+ net_info_ratelimited("dn_long_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
@@ -281,15 +279,13 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_short_output: no memory\n");
+ net_crit_ratelimited("dn_short_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_short_output: Increasing headroom\n");
+ net_info_ratelimited("dn_short_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
@@ -322,15 +318,13 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_phase3_output: no memory\n");
+ net_crit_ratelimited("dn_phase3_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_phase3_output: Increasing headroom\n");
+ net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 58084f37151e..c344163e6ac0 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -80,12 +80,15 @@ extern int decnet_log_martians;
static void dn_log_martian(struct sk_buff *skb, const char *msg)
{
- if (decnet_log_martians && net_ratelimit()) {
+ if (decnet_log_martians) {
char *devname = skb->dev ? skb->dev->name : "???";
struct dn_skb_cb *cb = DN_SKB_CB(skb);
- printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
- msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
- le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port));
+ net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
+ msg, devname,
+ le16_to_cpu(cb->src),
+ le16_to_cpu(cb->dst),
+ le16_to_cpu(cb->src_port),
+ le16_to_cpu(cb->dst_port));
}
}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index b952f88d9c1f..564a6ad13ce7 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -1,4 +1,3 @@
-
/*
* DECnet An implementation of the DECnet protocol suite for the LINUX
* operating system. DECnet is implemented using the BSD Socket
@@ -554,8 +553,8 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
unsigned char *msg;
if ((dst == NULL) || (rem == 0)) {
- if (net_ratelimit())
- printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst);
+ net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
+ le16_to_cpu(rem), dst);
return;
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 7e1f8788da19..586302e557ad 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -748,8 +748,7 @@ static int dn_output(struct sk_buff *skb)
dn_to_neigh_output);
error:
- if (net_ratelimit())
- printk(KERN_DEBUG "dn_output: This should not happen\n");
+ net_dbg_ratelimited("dn_output: This should not happen\n");
kfree_skb(skb);
@@ -807,12 +806,10 @@ drop:
*/
static int dn_rt_bug(struct sk_buff *skb)
{
- if (net_ratelimit()) {
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
- printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
- le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
- }
+ net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
+ le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
kfree_skb(skb);
@@ -1327,9 +1324,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
out_dev = DN_FIB_RES_DEV(res);
if (out_dev == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "Bug in dn_route_input_slow() "
- "No output device\n");
+ net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
goto e_inval;
}
dev_hold(out_dev);
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index a9a62f225a6b..650f3380c98a 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -836,8 +836,8 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
if (!create)
return NULL;
- if (in_interrupt() && net_ratelimit()) {
- printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
+ if (in_interrupt()) {
+ net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
return NULL;
}
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1531135130db..44b890936fc0 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -57,8 +57,7 @@ nlmsg_failure:
if (skb)
kfree_skb(skb);
*errp = -ENOMEM;
- if (net_ratelimit())
- printk(KERN_ERR "dn_rtmsg: error creating netlink message\n");
+ net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
return NULL;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 56cf9b8e1c7c..e32083d5d8f8 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -66,7 +66,7 @@ static int dsa_slave_open(struct net_device *dev)
if (!(master->flags & IFF_UP))
return -ENETDOWN;
- if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto out;
@@ -89,7 +89,7 @@ clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
del_unicast:
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
return err;
@@ -107,7 +107,7 @@ static int dsa_slave_close(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(master, -1);
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
return 0;
@@ -146,13 +146,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
if (!(dev->flags & IFF_UP))
goto out;
- if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
+ if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
err = dev_uc_add(master, addr->sa_data);
if (err < 0)
return err;
}
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
diff --git a/net/econet/Kconfig b/net/econet/Kconfig
deleted file mode 100644
index 39a2d2975e0e..000000000000
--- a/net/econet/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Acorn Econet/AUN protocols
-#
-
-config ECONET
- tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET
- ---help---
- Econet is a fairly old and slow networking protocol mainly used by
- Acorn computers to access file and print servers. It uses native
- Econet network cards. AUN is an implementation of the higher level
- parts of Econet that runs over ordinary Ethernet connections, on
- top of the UDP packet protocol, which in turn runs on top of the
- Internet protocol IP.
-
- If you say Y here, you can choose with the next two options whether
- to send Econet/AUN traffic over a UDP Ethernet connection or over
- a native Econet network card.
-
- To compile this driver as a module, choose M here: the module
- will be called econet.
-
-config ECONET_AUNUDP
- bool "AUN over UDP"
- depends on ECONET
- help
- Say Y here if you want to send Econet/AUN traffic over a UDP
- connection (UDP is a packet based protocol that runs on top of the
- Internet protocol IP) using an ordinary Ethernet network card.
-
-config ECONET_NATIVE
- bool "Native Econet"
- depends on ECONET
- help
- Say Y here if you have a native Econet network card installed in
- your computer.
diff --git a/net/econet/Makefile b/net/econet/Makefile
deleted file mode 100644
index 05fae8be2fed..000000000000
--- a/net/econet/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Econet support code.
-#
-
-obj-$(CONFIG_ECONET) += econet.o
-
-econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
deleted file mode 100644
index fa14ca76b77b..000000000000
--- a/net/econet/af_econet.c
+++ /dev/null
@@ -1,1172 +0,0 @@
-/*
- * An implementation of the Acorn Econet and AUN protocols.
- * Philip Blundell <philb@gnu.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#define pr_fmt(fmt) fmt
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/route.h>
-#include <linux/inet.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/skbuff.h>
-#include <linux/udp.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <net/sock.h>
-#include <net/inet_common.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/if_ec.h>
-#include <net/udp.h>
-#include <net/ip.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/uaccess.h>
-
-static const struct proto_ops econet_ops;
-static struct hlist_head econet_sklist;
-static DEFINE_SPINLOCK(econet_lock);
-static DEFINE_MUTEX(econet_mutex);
-
-/* Since there are only 256 possible network numbers (or fewer, depends
- how you count) it makes sense to use a simple lookup table. */
-static struct net_device *net2dev_map[256];
-
-#define EC_PORT_IP 0xd2
-
-#ifdef CONFIG_ECONET_AUNUDP
-static DEFINE_SPINLOCK(aun_queue_lock);
-static struct socket *udpsock;
-#define AUN_PORT 0x8000
-
-struct aunhdr {
- unsigned char code; /* AUN magic protocol byte */
- unsigned char port;
- unsigned char cb;
- unsigned char pad;
- unsigned long handle;
-};
-
-static unsigned long aun_seq;
-
-/* Queue of packets waiting to be transmitted. */
-static struct sk_buff_head aun_queue;
-static struct timer_list ab_cleanup_timer;
-
-#endif /* CONFIG_ECONET_AUNUDP */
-
-/* Per-packet information */
-struct ec_cb {
- struct sockaddr_ec sec;
- unsigned long cookie; /* Supplied by user. */
-#ifdef CONFIG_ECONET_AUNUDP
- int done;
- unsigned long seq; /* Sequencing */
- unsigned long timeout; /* Timeout */
- unsigned long start; /* jiffies */
-#endif
-#ifdef CONFIG_ECONET_NATIVE
- void (*sent)(struct sk_buff *, int result);
-#endif
-};
-
-static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
-{
- spin_lock_bh(&econet_lock);
- sk_del_node_init(sk);
- spin_unlock_bh(&econet_lock);
-}
-
-static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
-{
- spin_lock_bh(&econet_lock);
- sk_add_node(sk, list);
- spin_unlock_bh(&econet_lock);
-}
-
-/*
- * Pull a packet from our receive queue and hand it to the user.
- * If necessary we block.
- */
-
-static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- size_t copied;
- int err;
-
- msg->msg_namelen = sizeof(struct sockaddr_ec);
-
- mutex_lock(&econet_mutex);
-
- /*
- * Call the generic datagram receiver. This handles all sorts
- * of horrible races and re-entrancy so we can forget about it
- * in the protocol layers.
- *
- * Now it will return ENETDOWN, if device have just gone down,
- * but then it will block.
- */
-
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
-
- /*
- * An error occurred so return it. Because skb_recv_datagram()
- * handles the blocking we don't see and worry about blocking
- * retries.
- */
-
- if (skb == NULL)
- goto out;
-
- /*
- * You lose any data beyond the buffer you gave. If it worries a
- * user program they can ask the device for its MTU anyway.
- */
-
- copied = skb->len;
- if (copied > len) {
- copied = len;
- msg->msg_flags |= MSG_TRUNC;
- }
-
- /* We can't use skb_copy_datagram here */
- err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
- if (err)
- goto out_free;
- sk->sk_stamp = skb->tstamp;
-
- if (msg->msg_name)
- memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
-
- /*
- * Free or return the buffer as appropriate. Again this
- * hides all the races and re-entrancy issues from us.
- */
- err = copied;
-
-out_free:
- skb_free_datagram(sk, skb);
-out:
- mutex_unlock(&econet_mutex);
- return err;
-}
-
-/*
- * Bind an Econet socket.
- */
-
-static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
-{
- struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
- struct sock *sk;
- struct econet_sock *eo;
-
- /*
- * Check legality
- */
-
- if (addr_len < sizeof(struct sockaddr_ec) ||
- sec->sec_family != AF_ECONET)
- return -EINVAL;
-
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- eo = ec_sk(sk);
-
- eo->cb = sec->cb;
- eo->port = sec->port;
- eo->station = sec->addr.station;
- eo->net = sec->addr.net;
-
- mutex_unlock(&econet_mutex);
-
- return 0;
-}
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- * Queue a transmit result for the user to be told about.
- */
-
-static void tx_result(struct sock *sk, unsigned long cookie, int result)
-{
- struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
- struct ec_cb *eb;
- struct sockaddr_ec *sec;
-
- if (skb == NULL) {
- pr_debug("econet: memory squeeze, transmit result dropped\n");
- return;
- }
-
- eb = (struct ec_cb *)&skb->cb;
- sec = (struct sockaddr_ec *)&eb->sec;
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->cookie = cookie;
- sec->type = ECTYPE_TRANSMIT_STATUS | result;
- sec->sec_family = AF_ECONET;
-
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-/*
- * Called by the Econet hardware driver when a packet transmit
- * has completed. Tell the user.
- */
-
-static void ec_tx_done(struct sk_buff *skb, int result)
-{
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- tx_result(skb->sk, eb->cookie, result);
-}
-#endif
-
-/*
- * Send a packet. We have to work out which device it's going out on
- * and hence whether to use real Econet or the UDP emulation.
- */
-
-static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
-{
- struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
- struct net_device *dev;
- struct ec_addr addr;
- int err;
- unsigned char port, cb;
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- struct ec_cb *eb;
-#endif
-#ifdef CONFIG_ECONET_AUNUDP
- struct msghdr udpmsg;
- struct iovec iov[2];
- struct aunhdr ah;
- struct sockaddr_in udpdest;
- __kernel_size_t size;
- mm_segment_t oldfs;
- char *userbuf;
-#endif
-
- /*
- * Check the flags.
- */
-
- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
- return -EINVAL;
-
- /*
- * Get and verify the address.
- */
-
- mutex_lock(&econet_mutex);
-
- if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
- mutex_unlock(&econet_mutex);
- return -EINVAL;
- }
- addr.station = saddr->addr.station;
- addr.net = saddr->addr.net;
- port = saddr->port;
- cb = saddr->cb;
-
- /* Look for a device with the right network number. */
- dev = net2dev_map[addr.net];
-
- /* If not directly reachable, use some default */
- if (dev == NULL) {
- dev = net2dev_map[0];
- /* No interfaces at all? */
- if (dev == NULL) {
- mutex_unlock(&econet_mutex);
- return -ENETDOWN;
- }
- }
-
- if (dev->type == ARPHRD_ECONET) {
- /* Real hardware Econet. We're not worthy etc. */
-#ifdef CONFIG_ECONET_NATIVE
- unsigned short proto = 0;
- int hlen, tlen;
- int res;
-
- if (len + 15 > dev->mtu) {
- mutex_unlock(&econet_mutex);
- return -EMSGSIZE;
- }
-
- dev_hold(dev);
-
- hlen = LL_RESERVED_SPACE(dev);
- tlen = dev->needed_tailroom;
- skb = sock_alloc_send_skb(sk, len + hlen + tlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
- goto out_unlock;
-
- skb_reserve(skb, hlen);
- skb_reset_network_header(skb);
-
- eb = (struct ec_cb *)&skb->cb;
-
- eb->cookie = saddr->cookie;
- eb->sec = *saddr;
- eb->sent = ec_tx_done;
-
- err = -EINVAL;
- res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
- if (res < 0)
- goto out_free;
- if (res > 0) {
- struct ec_framehdr *fh;
- /* Poke in our control byte and
- port number. Hack, hack. */
- fh = (struct ec_framehdr *)skb->data;
- fh->cb = cb;
- fh->port = port;
- if (sock->type != SOCK_DGRAM) {
- skb_reset_tail_pointer(skb);
- skb->len = 0;
- }
- }
-
- /* Copy the data. Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
- skb->protocol = proto;
- skb->dev = dev;
- skb->priority = sk->sk_priority;
- if (err)
- goto out_free;
-
- err = -ENETDOWN;
- if (!(dev->flags & IFF_UP))
- goto out_free;
-
- /*
- * Now send it
- */
-
- dev_queue_xmit(skb);
- dev_put(dev);
- mutex_unlock(&econet_mutex);
- return len;
-
-out_free:
- kfree_skb(skb);
-out_unlock:
- if (dev)
- dev_put(dev);
-#else
- err = -EPROTOTYPE;
-#endif
- mutex_unlock(&econet_mutex);
-
- return err;
- }
-
-#ifdef CONFIG_ECONET_AUNUDP
- /* AUN virtual Econet. */
-
- if (udpsock == NULL) {
- mutex_unlock(&econet_mutex);
- return -ENETDOWN; /* No socket - can't send */
- }
-
- if (len > 32768) {
- err = -E2BIG;
- goto error;
- }
-
- /* Make up a UDP datagram and hand it off to some higher intellect. */
-
- memset(&udpdest, 0, sizeof(udpdest));
- udpdest.sin_family = AF_INET;
- udpdest.sin_port = htons(AUN_PORT);
-
- /* At the moment we use the stupid Acorn scheme of Econet address
- y.x maps to IP a.b.c.x. This should be replaced with something
- more flexible and more aware of subnet masks. */
- {
- struct in_device *idev;
- unsigned long network = 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (idev) {
- if (idev->ifa_list)
- network = ntohl(idev->ifa_list->ifa_address) &
- 0xffffff00; /* !!! */
- }
- rcu_read_unlock();
- udpdest.sin_addr.s_addr = htonl(network | addr.station);
- }
-
- memset(&ah, 0, sizeof(ah));
- ah.port = port;
- ah.cb = cb & 0x7f;
- ah.code = 2; /* magic */
-
- /* tack our header on the front of the iovec */
- size = sizeof(struct aunhdr);
- iov[0].iov_base = (void *)&ah;
- iov[0].iov_len = size;
-
- userbuf = vmalloc(len);
- if (userbuf == NULL) {
- err = -ENOMEM;
- goto error;
- }
-
- iov[1].iov_base = userbuf;
- iov[1].iov_len = len;
- err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
- if (err)
- goto error_free_buf;
-
- /* Get a skbuff (no data, just holds our cb information) */
- skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
- goto error_free_buf;
-
- eb = (struct ec_cb *)&skb->cb;
-
- eb->cookie = saddr->cookie;
- eb->timeout = 5 * HZ;
- eb->start = jiffies;
- ah.handle = aun_seq;
- eb->seq = (aun_seq++);
- eb->sec = *saddr;
-
- skb_queue_tail(&aun_queue, skb);
-
- udpmsg.msg_name = (void *)&udpdest;
- udpmsg.msg_namelen = sizeof(udpdest);
- udpmsg.msg_iov = &iov[0];
- udpmsg.msg_iovlen = 2;
- udpmsg.msg_control = NULL;
- udpmsg.msg_controllen = 0;
- udpmsg.msg_flags = 0;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS); /* More privs :-) */
- err = sock_sendmsg(udpsock, &udpmsg, size);
- set_fs(oldfs);
-
-error_free_buf:
- vfree(userbuf);
-error:
-#else
- err = -EPROTOTYPE;
-#endif
- mutex_unlock(&econet_mutex);
-
- return err;
-}
-
-/*
- * Look up the address of a socket.
- */
-
-static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
-{
- struct sock *sk;
- struct econet_sock *eo;
- struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
-
- if (peer)
- return -EOPNOTSUPP;
-
- memset(sec, 0, sizeof(*sec));
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- eo = ec_sk(sk);
-
- sec->sec_family = AF_ECONET;
- sec->port = eo->port;
- sec->addr.station = eo->station;
- sec->addr.net = eo->net;
-
- mutex_unlock(&econet_mutex);
-
- *uaddr_len = sizeof(*sec);
- return 0;
-}
-
-static void econet_destroy_timer(unsigned long data)
-{
- struct sock *sk = (struct sock *)data;
-
- if (!sk_has_allocations(sk)) {
- sk_free(sk);
- return;
- }
-
- sk->sk_timer.expires = jiffies + 10 * HZ;
- add_timer(&sk->sk_timer);
- pr_debug("econet: socket destroy delayed\n");
-}
-
-/*
- * Close an econet socket.
- */
-
-static int econet_release(struct socket *sock)
-{
- struct sock *sk;
-
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- if (!sk)
- goto out_unlock;
-
- econet_remove_socket(&econet_sklist, sk);
-
- /*
- * Now the socket is dead. No more input will appear.
- */
-
- sk->sk_state_change(sk); /* It is useless. Just for sanity. */
-
- sock_orphan(sk);
-
- /* Purge queues */
-
- skb_queue_purge(&sk->sk_receive_queue);
-
- if (sk_has_allocations(sk)) {
- sk->sk_timer.data = (unsigned long)sk;
- sk->sk_timer.expires = jiffies + HZ;
- sk->sk_timer.function = econet_destroy_timer;
- add_timer(&sk->sk_timer);
-
- goto out_unlock;
- }
-
- sk_free(sk);
-
-out_unlock:
- mutex_unlock(&econet_mutex);
- return 0;
-}
-
-static struct proto econet_proto = {
- .name = "ECONET",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct econet_sock),
-};
-
-/*
- * Create an Econet socket
- */
-
-static int econet_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
- struct econet_sock *eo;
- int err;
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
- /* Econet only provides datagram services. */
- if (sock->type != SOCK_DGRAM)
- return -ESOCKTNOSUPPORT;
-
- sock->state = SS_UNCONNECTED;
-
- err = -ENOBUFS;
- sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
- if (sk == NULL)
- goto out;
-
- sk->sk_reuse = SK_CAN_REUSE;
- sock->ops = &econet_ops;
- sock_init_data(sock, sk);
-
- eo = ec_sk(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
- sk->sk_family = PF_ECONET;
- eo->num = protocol;
-
- econet_insert_socket(&econet_sklist, sk);
- return 0;
-out:
- return err;
-}
-
-/*
- * Handle Econet specific ioctls
- */
-
-static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
-{
- struct ifreq ifr;
- struct ec_device *edev;
- struct net_device *dev;
- struct sockaddr_ec *sec;
- int err;
-
- /*
- * Fetch the caller's info block into kernel space
- */
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- return -EFAULT;
-
- dev = dev_get_by_name(&init_net, ifr.ifr_name);
- if (dev == NULL)
- return -ENODEV;
-
- sec = (struct sockaddr_ec *)&ifr.ifr_addr;
-
- mutex_lock(&econet_mutex);
-
- err = 0;
- switch (cmd) {
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
-
- edev = dev->ec_ptr;
- if (edev == NULL) {
- /* Magic up a new one. */
- edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
- if (edev == NULL) {
- err = -ENOMEM;
- break;
- }
- dev->ec_ptr = edev;
- } else
- net2dev_map[edev->net] = NULL;
- edev->station = sec->addr.station;
- edev->net = sec->addr.net;
- net2dev_map[sec->addr.net] = dev;
- if (!net2dev_map[0])
- net2dev_map[0] = dev;
- break;
-
- case SIOCGIFADDR:
- edev = dev->ec_ptr;
- if (edev == NULL) {
- err = -ENODEV;
- break;
- }
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->addr.station = edev->station;
- sec->addr.net = edev->net;
- sec->sec_family = AF_ECONET;
- dev_put(dev);
- if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
- err = -EFAULT;
- break;
-
- default:
- err = -EINVAL;
- break;
- }
-
- mutex_unlock(&econet_mutex);
-
- dev_put(dev);
-
- return err;
-}
-
-/*
- * Handle generic ioctls
- */
-
-static int econet_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case SIOCGSTAMP:
- return sock_get_timestamp(sk, argp);
-
- case SIOCGSTAMPNS:
- return sock_get_timestampns(sk, argp);
-
- case SIOCSIFADDR:
- case SIOCGIFADDR:
- return ec_dev_ioctl(sock, cmd, argp);
-
- }
-
- return -ENOIOCTLCMD;
-}
-
-static const struct net_proto_family econet_family_ops = {
- .family = PF_ECONET,
- .create = econet_create,
- .owner = THIS_MODULE,
-};
-
-static const struct proto_ops econet_ops = {
- .family = PF_ECONET,
- .owner = THIS_MODULE,
- .release = econet_release,
- .bind = econet_bind,
- .connect = sock_no_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = econet_getname,
- .poll = datagram_poll,
- .ioctl = econet_ioctl,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = sock_no_setsockopt,
- .getsockopt = sock_no_getsockopt,
- .sendmsg = econet_sendmsg,
- .recvmsg = econet_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-};
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- * Find the listening socket, if any, for the given data.
- */
-
-static struct sock *ec_listening_socket(unsigned char port, unsigned char
- station, unsigned char net)
-{
- struct sock *sk;
- struct hlist_node *node;
-
- spin_lock(&econet_lock);
- sk_for_each(sk, node, &econet_sklist) {
- struct econet_sock *opt = ec_sk(sk);
- if ((opt->port == port || opt->port == 0) &&
- (opt->station == station || opt->station == 0) &&
- (opt->net == net || opt->net == 0)) {
- sock_hold(sk);
- goto found;
- }
- }
- sk = NULL;
-found:
- spin_unlock(&econet_lock);
- return sk;
-}
-
-/*
- * Queue a received packet for a socket.
- */
-
-static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
- unsigned char stn, unsigned char net,
- unsigned char cb, unsigned char port)
-{
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
-
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->sec_family = AF_ECONET;
- sec->type = ECTYPE_PACKET_RECEIVED;
- sec->port = port;
- sec->cb = cb;
- sec->addr.net = net;
- sec->addr.station = stn;
-
- return sock_queue_rcv_skb(sk, skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_AUNUDP
-/*
- * Send an AUN protocol response.
- */
-
-static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
-{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(AUN_PORT),
- .sin_addr = {.s_addr = addr}
- };
- struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
- struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
- struct msghdr udpmsg;
-
- udpmsg.msg_name = (void *)&sin;
- udpmsg.msg_namelen = sizeof(sin);
- udpmsg.msg_control = NULL;
- udpmsg.msg_controllen = 0;
- udpmsg.msg_flags = 0;
-
- kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
-}
-
-
-/*
- * Handle incoming AUN packets. Work out if anybody wants them,
- * and send positive or negative acknowledgements as appropriate.
- */
-
-static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
-{
- struct iphdr *ip = ip_hdr(skb);
- unsigned char stn = ntohl(ip->saddr) & 0xff;
- struct dst_entry *dst = skb_dst(skb);
- struct ec_device *edev = NULL;
- struct sock *sk = NULL;
- struct sk_buff *newskb;
-
- if (dst)
- edev = dst->dev->ec_ptr;
-
- if (!edev)
- goto bad;
-
- sk = ec_listening_socket(ah->port, stn, edev->net);
- if (sk == NULL)
- goto bad; /* Nobody wants it */
-
- newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
- GFP_ATOMIC);
- if (newskb == NULL) {
- pr_debug("AUN: memory squeeze, dropping packet\n");
- /* Send nack and hope sender tries again */
- goto bad;
- }
-
- memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
- len - sizeof(struct aunhdr));
-
- if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
- /* Socket is bankrupt. */
- kfree_skb(newskb);
- goto bad;
- }
-
- aun_send_response(ip->saddr, ah->handle, 3, 0);
- sock_put(sk);
- return;
-
-bad:
- aun_send_response(ip->saddr, ah->handle, 4, 0);
- if (sk)
- sock_put(sk);
-}
-
-/*
- * Handle incoming AUN transmit acknowledgements. If the sequence
- * number matches something in our backlog then kill it and tell
- * the user. If the remote took too long to reply then we may have
- * dropped the packet already.
- */
-
-static void aun_tx_ack(unsigned long seq, int result)
-{
- struct sk_buff *skb;
- unsigned long flags;
- struct ec_cb *eb;
-
- spin_lock_irqsave(&aun_queue_lock, flags);
- skb_queue_walk(&aun_queue, skb) {
- eb = (struct ec_cb *)&skb->cb;
- if (eb->seq == seq)
- goto foundit;
- }
- spin_unlock_irqrestore(&aun_queue_lock, flags);
- pr_debug("AUN: unknown sequence %ld\n", seq);
- return;
-
-foundit:
- tx_result(skb->sk, eb->cookie, result);
- skb_unlink(skb, &aun_queue);
- spin_unlock_irqrestore(&aun_queue_lock, flags);
- kfree_skb(skb);
-}
-
-/*
- * Deal with received AUN frames - sort out what type of thing it is
- * and hand it to the right function.
- */
-
-static void aun_data_available(struct sock *sk, int slen)
-{
- int err;
- struct sk_buff *skb;
- unsigned char *data;
- struct aunhdr *ah;
- size_t len;
-
- while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
- if (err == -EAGAIN) {
- pr_err("AUN: no data available?!\n");
- return;
- }
- pr_debug("AUN: recvfrom() error %d\n", -err);
- }
-
- data = skb_transport_header(skb) + sizeof(struct udphdr);
- ah = (struct aunhdr *)data;
- len = skb->len - sizeof(struct udphdr);
-
- switch (ah->code) {
- case 2:
- aun_incoming(skb, ah, len);
- break;
- case 3:
- aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
- break;
- case 4:
- aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
- break;
- default:
- pr_debug("AUN: unknown packet type: %d\n", data[0]);
- }
-
- skb_free_datagram(sk, skb);
-}
-
-/*
- * Called by the timer to manage the AUN transmit queue. If a packet
- * was sent to a dead or nonexistent host then we will never get an
- * acknowledgement back. After a few seconds we need to spot this and
- * drop the packet.
- */
-
-static void ab_cleanup(unsigned long h)
-{
- struct sk_buff *skb, *n;
- unsigned long flags;
-
- spin_lock_irqsave(&aun_queue_lock, flags);
- skb_queue_walk_safe(&aun_queue, skb, n) {
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- if ((jiffies - eb->start) > eb->timeout) {
- tx_result(skb->sk, eb->cookie,
- ECTYPE_TRANSMIT_NOT_PRESENT);
- skb_unlink(skb, &aun_queue);
- kfree_skb(skb);
- }
- }
- spin_unlock_irqrestore(&aun_queue_lock, flags);
-
- mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
-}
-
-static int __init aun_udp_initialise(void)
-{
- int error;
- struct sockaddr_in sin;
-
- skb_queue_head_init(&aun_queue);
- setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
- ab_cleanup_timer.expires = jiffies + (HZ * 2);
- add_timer(&ab_cleanup_timer);
-
- memset(&sin, 0, sizeof(sin));
- sin.sin_port = htons(AUN_PORT);
-
- /* We can count ourselves lucky Acorn machines are too dim to
- speak IPv6. :-) */
- error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
- if (error < 0) {
- pr_err("AUN: socket error %d\n", -error);
- return error;
- }
-
- udpsock->sk->sk_reuse = SK_CAN_REUSE;
- udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
- from interrupts */
-
- error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
- sizeof(sin));
- if (error < 0) {
- pr_err("AUN: bind error %d\n", -error);
- goto release;
- }
-
- udpsock->sk->sk_data_ready = aun_data_available;
-
- return 0;
-
-release:
- sock_release(udpsock);
- udpsock = NULL;
- return error;
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-
-/*
- * Receive an Econet frame from a device.
- */
-
-static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- struct ec_framehdr *hdr;
- struct sock *sk = NULL;
- struct ec_device *edev = dev->ec_ptr;
-
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto drop;
-
- if (!edev)
- goto drop;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == NULL)
- return NET_RX_DROP;
-
- if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
- goto drop;
-
- hdr = (struct ec_framehdr *)skb->data;
-
- /* First check for encapsulated IP */
- if (hdr->port == EC_PORT_IP) {
- skb->protocol = htons(ETH_P_IP);
- skb_pull(skb, sizeof(struct ec_framehdr));
- netif_rx(skb);
- return NET_RX_SUCCESS;
- }
-
- sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
- if (!sk)
- goto drop;
-
- if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
- hdr->port))
- goto drop;
- sock_put(sk);
- return NET_RX_SUCCESS;
-
-drop:
- if (sk)
- sock_put(sk);
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static struct packet_type econet_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_ECONET),
- .func = econet_rcv,
-};
-
-static void econet_hw_initialise(void)
-{
- dev_add_pack(&econet_packet_type);
-}
-
-#endif
-
-static int econet_notifier(struct notifier_block *this, unsigned long msg,
- void *data)
-{
- struct net_device *dev = data;
- struct ec_device *edev;
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- switch (msg) {
- case NETDEV_UNREGISTER:
- /* A device has gone down - kill any data we hold for it. */
- edev = dev->ec_ptr;
- if (edev) {
- if (net2dev_map[0] == dev)
- net2dev_map[0] = NULL;
- net2dev_map[edev->net] = NULL;
- kfree(edev);
- dev->ec_ptr = NULL;
- }
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block econet_netdev_notifier = {
- .notifier_call = econet_notifier,
-};
-
-static void __exit econet_proto_exit(void)
-{
-#ifdef CONFIG_ECONET_AUNUDP
- del_timer(&ab_cleanup_timer);
- if (udpsock)
- sock_release(udpsock);
-#endif
- unregister_netdevice_notifier(&econet_netdev_notifier);
-#ifdef CONFIG_ECONET_NATIVE
- dev_remove_pack(&econet_packet_type);
-#endif
- sock_unregister(econet_family_ops.family);
- proto_unregister(&econet_proto);
-}
-
-static int __init econet_proto_init(void)
-{
- int err = proto_register(&econet_proto, 0);
-
- if (err != 0)
- goto out;
- sock_register(&econet_family_ops);
-#ifdef CONFIG_ECONET_AUNUDP
- aun_udp_initialise();
-#endif
-#ifdef CONFIG_ECONET_NATIVE
- econet_hw_initialise();
-#endif
- register_netdevice_notifier(&econet_netdev_notifier);
-out:
- return err;
-}
-
-module_init(econet_proto_init);
-module_exit(econet_proto_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_ECONET);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5889a6c38a10..36e58800a9e3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -164,7 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
- if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
@@ -179,7 +179,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
*/
else if (1 /*dev->flags&IFF_PROMISC */ ) {
- if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+ dev->dev_addr)))
skb->pkt_type = PACKET_OTHERHOST;
}
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 3bdc4303c339..eed291626da6 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -179,6 +179,7 @@ static int ieee802154_add_iface(struct sk_buff *skb,
const char *devname;
int rc = -ENOBUFS;
struct net_device *dev;
+ int type = __IEEE802154_DEV_INVALID;
pr_debug("%s\n", __func__);
@@ -221,7 +222,13 @@ static int ieee802154_add_iface(struct sk_buff *skb,
goto nla_put_failure;
}
- dev = phy->add_iface(phy, devname);
+ if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
+ type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
+ if (type >= __IEEE802154_DEV_MAX)
+ return -EINVAL;
+ }
+
+ dev = phy->add_iface(phy, devname, type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index d183262943d9..20f1cb5c8aba 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -262,8 +262,8 @@ config ARPD
bool "IP: ARP daemon support"
---help---
The kernel maintains an internal cache which maps IP addresses to
- hardware addresses on the local network, so that Ethernet/Token Ring/
- etc. frames are sent to the proper address on the physical networking
+ hardware addresses on the local network, so that Ethernet
+ frames are sent to the proper address on the physical networking
layer. Normally, kernel uses the ARP protocol to resolve these
mappings.
@@ -312,7 +312,7 @@ config SYN_COOKIES
config INET_AH
tristate "IP: AH transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
@@ -324,7 +324,7 @@ config INET_AH
config INET_ESP
tristate "IP: ESP transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_AUTHENC
select CRYPTO_HMAC
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 3a280756dd73..e8f2617ecd47 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -406,8 +406,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
- ntohl(ah->spi), ntohl(iph->daddr));
+ pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
+ ntohl(ah->spi), ntohl(iph->daddr));
xfrm_state_put(x);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 373b56bf8f49..cda37be02f8d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -73,6 +73,8 @@
* Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -89,7 +91,6 @@
#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/if_arp.h>
-#include <linux/trdevice.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -193,9 +194,6 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
case ARPHRD_IEEE802:
ip_eth_mc_map(addr, haddr);
return 0;
- case ARPHRD_IEEE802_TR:
- ip_tr_mc_map(addr, haddr);
- return 0;
case ARPHRD_INFINIBAND:
ip_ib_mc_map(addr, dev->broadcast, haddr);
return 0;
@@ -364,8 +362,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
probes -= neigh->parms->ucast_probes;
if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID))
- printk(KERN_DEBUG
- "trying to ucast probe in NUD_INVALID\n");
+ pr_debug("trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
} else {
@@ -452,7 +449,7 @@ static int arp_set_predefined(int addr_hint, unsigned char *haddr,
{
switch (addr_hint) {
case RTN_LOCAL:
- printk(KERN_DEBUG "ARP: arp called for own IP address\n");
+ pr_debug("arp called for own IP address\n");
memcpy(haddr, dev->dev_addr, dev->addr_len);
return 1;
case RTN_MULTICAST:
@@ -473,7 +470,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
struct neighbour *n;
if (!skb_dst(skb)) {
- printk(KERN_DEBUG "arp_find is called with dst==NULL\n");
+ pr_debug("arp_find is called with dst==NULL\n");
kfree_skb(skb);
return 1;
}
@@ -648,12 +645,6 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp->ar_pro = htons(ETH_P_IP);
break;
#endif
-#if IS_ENABLED(CONFIG_TR)
- case ARPHRD_IEEE802_TR:
- arp->ar_hrd = htons(ARPHRD_IEEE802);
- arp->ar_pro = htons(ETH_P_IP);
- break;
-#endif
}
arp->ar_hln = dev->addr_len;
@@ -751,11 +742,10 @@ static int arp_process(struct sk_buff *skb)
goto out;
break;
case ARPHRD_ETHER:
- case ARPHRD_IEEE802_TR:
case ARPHRD_FDDI:
case ARPHRD_IEEE802:
/*
- * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
+ * ETHERNET, and Fibre Channel (which are IEEE 802
* devices, according to RFC 2625) devices will accept ARP
* hardware types of either 1 (Ethernet) or 6 (IEEE 802.2).
* This is the case also of FDDI, where the RFC 1390 says that
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 88c9e3f68c78..10e15a144e95 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -217,8 +217,7 @@ void in_dev_finish_destroy(struct in_device *idev)
WARN_ON(idev->ifa_list);
WARN_ON(idev->mc_list);
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
- idev, dev ? dev->name : "NIL");
+ pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead)
@@ -1174,7 +1173,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
- printk(KERN_DEBUG "inetdev_event: bug\n");
+ pr_debug("%s: bug\n", __func__);
RCU_INIT_POINTER(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1a37b4..30b88d7b4bd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
continue;
+ if (fi->fib_dead)
+ continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 2cb2bf845641..c75efbdc71cb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -713,11 +713,10 @@ static void icmp_unreach(struct sk_buff *skb)
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
- if (net_ratelimit())
- pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
- &ip_hdr(skb)->saddr,
- icmph->type, icmph->code,
- &iph->daddr, skb->dev->name);
+ net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
+ &ip_hdr(skb)->saddr,
+ icmph->type, icmph->code,
+ &iph->daddr, skb->dev->name);
goto out;
}
@@ -906,8 +905,7 @@ out_err:
static void icmp_address(struct sk_buff *skb)
{
#if 0
- if (net_ratelimit())
- printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
+ net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
#endif
}
@@ -943,10 +941,10 @@ static void icmp_address_reply(struct sk_buff *skb)
inet_ifa_match(ip_hdr(skb)->saddr, ifa))
break;
}
- if (!ifa && net_ratelimit()) {
- pr_info("Wrong address mask %pI4 from %s/%pI4\n",
- mp, dev->name, &ip_hdr(skb)->saddr);
- }
+ if (!ifa)
+ net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
+ mp,
+ dev->name, &ip_hdr(skb)->saddr);
}
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 543ef6225458..2784db3155fb 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -89,8 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
#ifdef SOCK_REFCNT_DEBUG
if (atomic_read(&tw->tw_refcnt) != 1) {
- printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
- tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
+ pr_debug("%s timewait_sock %p refcnt=%d\n",
+ tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
}
#endif
while (refcnt) {
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 71e5c328176c..9dbd3dd6022d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -148,17 +148,17 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q)
return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
}
-static int ip4_frag_match(struct inet_frag_queue *q, void *a)
+static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
{
struct ipq *qp;
struct ip4_create_arg *arg = a;
qp = container_of(q, struct ipq, q);
return qp->id == arg->iph->id &&
- qp->saddr == arg->iph->saddr &&
- qp->daddr == arg->iph->daddr &&
- qp->protocol == arg->iph->protocol &&
- qp->user == arg->user;
+ qp->saddr == arg->iph->saddr &&
+ qp->daddr == arg->iph->daddr &&
+ qp->protocol == arg->iph->protocol &&
+ qp->user == arg->user;
}
/* Memory Tracking Functions. */
@@ -545,6 +545,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
int len;
int ihlen;
int err;
+ int sum_truesize;
u8 ecn;
ipq_kill(qp);
@@ -611,19 +612,32 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
atomic_add(clone->truesize, &qp->q.net->mem);
}
- skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
}
- atomic_sub(head->truesize, &qp->q.net->mem);
+ atomic_sub(sum_truesize, &qp->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -644,8 +658,7 @@ out_nomem:
err = -ENOMEM;
goto out_fail;
out_oversize:
- if (net_ratelimit())
- pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26eccc5bab1c..8590144ca330 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -210,9 +210,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
int ret;
if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
- if (net_ratelimit())
- printk("%s: proto %d isn't netns-ready\n",
- __func__, protocol);
+ net_info_ratelimited("%s: proto %d isn't netns-ready\n",
+ __func__, protocol);
kfree_skb(skb);
goto out;
}
@@ -298,10 +297,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
if (in_dev) {
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
- if (IN_DEV_LOG_MARTIANS(in_dev) &&
- net_ratelimit())
- pr_info("source route option %pI4 -> %pI4\n",
- &iph->saddr, &iph->daddr);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_info_ratelimited("source route option %pI4 -> %pI4\n",
+ &iph->saddr,
+ &iph->daddr);
goto drop;
}
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 95722ed0e5bb..708b99494e23 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -578,8 +578,10 @@ void ip_forward_options(struct sk_buff *skb)
ip_hdr(skb)->daddr = opt->nexthop;
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
optptr[2] = srrptr+4;
- } else if (net_ratelimit())
- pr_crit("%s(): Argh! Destination lost!\n", __func__);
+ } else {
+ net_crit_ratelimited("%s(): Argh! Destination lost!\n",
+ __func__);
+ }
if (opt->ts_needaddr) {
optptr = raw + opt->ts;
ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4910176d24ed..451f97c42eb4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -214,8 +214,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
}
rcu_read_unlock();
- if (net_ratelimit())
- printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
+ net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+ __func__);
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f267280d8709..67e8a6b086ea 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -808,8 +808,6 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
b->op = BOOTP_REQUEST;
if (dev->type < 256) /* check for false types */
b->htype = dev->type;
- else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */
- b->htype = ARPHRD_IEEE802;
else if (dev->type == ARPHRD_FDDI)
b->htype = ARPHRD_ETHER;
else {
@@ -955,8 +953,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Fragments are not supported */
if (ip_is_fragment(h)) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Ignoring fragmented reply\n");
+ net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n");
goto drop;
}
@@ -1004,16 +1001,14 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Is it a reply to our BOOTP request? */
if (b->op != BOOTP_REPLY ||
b->xid != d->xid) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
- b->op, b->xid);
+ net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
+ b->op, b->xid);
goto drop_unlock;
}
/* Is it a reply for the device we are configuring? */
if (b->xid != ic_dev_xid) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Ignoring delayed packet\n");
+ net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n");
goto drop_unlock;
}
@@ -1626,11 +1621,13 @@ static int __init ip_auto_config_setup(char *addrs)
return 1;
}
+__setup("ip=", ip_auto_config_setup);
static int __init nfsaddrs_config_setup(char *addrs)
{
return ip_auto_config_setup(addrs);
}
+__setup("nfsaddrs=", nfsaddrs_config_setup);
static int __init vendor_class_identifier_setup(char *addrs)
{
@@ -1641,7 +1638,4 @@ static int __init vendor_class_identifier_setup(char *addrs)
vendor_class_identifier);
return 1;
}
-
-__setup("ip=", ip_auto_config_setup);
-__setup("nfsaddrs=", nfsaddrs_config_setup);
__setup("dhcpclass=", vendor_class_identifier_setup);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5bef604ac0fa..a9e519ad6db5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -949,8 +949,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
ret = sock_queue_rcv_skb(mroute_sk, skb);
rcu_read_unlock();
if (ret < 0) {
- if (net_ratelimit())
- pr_warn("mroute: pending queue full, dropping entries\n");
+ net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
kfree_skb(skb);
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index a3935273869f..97e61eadf580 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -221,9 +221,8 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
static unsigned int
arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_err("arp_tables: error: '%s'\n",
- (const char *)par->targinfo);
+ net_err_ratelimited("arp_tables: error: '%s'\n",
+ (const char *)par->targinfo);
return NF_DROP;
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 585b80f3cc68..170b1fdd6b72 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -153,8 +153,7 @@ ip_checkentry(const struct ipt_ip *ip)
static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_info("error: `%s'\n", (const char *)par->targinfo);
+ net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a639967eb727..fe5daea5214d 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -246,8 +246,7 @@ clusterip_hashfn(const struct sk_buff *skb,
dport = ports[1];
}
} else {
- if (net_ratelimit())
- pr_info("unknown protocol %u\n", iph->protocol);
+ net_info_ratelimited("unknown protocol %u\n", iph->protocol);
}
switch (config->hash_mode) {
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 82536701e3a3..cad29c121318 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -42,9 +42,7 @@ static int set_addr(struct sk_buff *skb,
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet"
- " error\n");
+ net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
return -1;
}
@@ -58,9 +56,7 @@ static int set_addr(struct sk_buff *skb,
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet"
- " error\n");
+ net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
return -1;
}
/* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
@@ -214,8 +210,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
/* Run out of expectations */
if (i >= H323_RTP_CHANNEL_MAX) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of expectations\n");
+ net_notice_ratelimited("nf_nat_h323: out of expectations\n");
return 0;
}
@@ -244,8 +239,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of RTP ports\n");
+ net_notice_ratelimited("nf_nat_h323: out of RTP ports\n");
return 0;
}
@@ -308,8 +302,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
return 0;
}
@@ -365,8 +358,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_q931: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
@@ -456,8 +448,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_ras: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
return 0;
}
@@ -545,8 +536,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_q931: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 2133c30a4a5f..746edec8b86e 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1206,8 +1206,7 @@ static int snmp_translate(struct nf_conn *ct,
if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
paylen, &map, &udph->check)) {
- if (net_ratelimit())
- printk(KERN_WARNING "bsalg: parser failed\n");
+ net_warn_ratelimited("bsalg: parser failed\n");
return NF_DROP;
}
return NF_ACCEPT;
@@ -1241,9 +1240,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
* can mess around with the payload.
*/
if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
- if (net_ratelimit())
- printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
- &iph->saddr, &iph->daddr);
+ net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
+ &iph->saddr, &iph->daddr);
return NF_DROP;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5773f5d9e213..ffcb3b016843 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -960,8 +960,7 @@ void rt_cache_flush_batch(struct net *net)
static void rt_emergency_hash_rebuild(struct net *net)
{
- if (net_ratelimit())
- pr_warn("Route hash chain too long!\n");
+ net_warn_ratelimited("Route hash chain too long!\n");
rt_cache_invalidate(net);
}
@@ -1084,8 +1083,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
goto out;
if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
goto out;
- if (net_ratelimit())
- pr_warn("dst cache overflow\n");
+ net_warn_ratelimited("dst cache overflow\n");
RT_CACHE_STAT_INC(gc_dst_overflow);
return 1;
@@ -1182,8 +1180,7 @@ restart:
if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
int err = rt_bind_neighbour(rt);
if (err) {
- if (net_ratelimit())
- pr_warn("Neighbour table failure & not caching routes\n");
+ net_warn_ratelimited("Neighbour table failure & not caching routes\n");
ip_rt_put(rt);
return ERR_PTR(err);
}
@@ -1299,8 +1296,7 @@ restart:
goto restart;
}
- if (net_ratelimit())
- pr_warn("Neighbour table overflow\n");
+ net_warn_ratelimited("Neighbour table overflow\n");
rt_drop(rt);
return ERR_PTR(-ENOBUFS);
}
@@ -1378,8 +1374,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
return;
}
} else if (!rt)
- printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
- __builtin_return_address(0));
+ pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
ip_select_fb_ident(iph);
}
@@ -1503,11 +1498,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- pr_info("Redirect from %pI4 on %s about %pI4 ignored\n"
- " Advised path = %pI4 -> %pI4\n",
- &old_gw, dev->name, &new_gw,
- &saddr, &daddr);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
+ " Advised path = %pI4 -> %pI4\n",
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
;
}
@@ -1617,11 +1612,10 @@ void ip_rt_send_redirect(struct sk_buff *skb)
++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- peer->rate_tokens == ip_rt_redirect_number &&
- net_ratelimit())
- pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
- &ip_hdr(skb)->saddr, rt->rt_iif,
- &rt->rt_dst, &rt->rt_gateway);
+ peer->rate_tokens == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, rt->rt_iif,
+ &rt->rt_dst, &rt->rt_gateway);
#endif
}
}
@@ -1844,9 +1838,9 @@ static void ipv4_link_failure(struct sk_buff *skb)
static int ip_rt_bug(struct sk_buff *skb)
{
- printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
- &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
- skb->dev ? skb->dev->name : "?");
+ pr_debug("%s: %pI4 -> %pI4, %s\n",
+ __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+ skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
WARN_ON(1);
return 0;
@@ -2135,8 +2129,7 @@ static int __mkroute_input(struct sk_buff *skb,
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
- if (net_ratelimit())
- pr_crit("Bug in ip_route_input_slow(). Please report.\n");
+ net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
@@ -2407,9 +2400,9 @@ no_route:
martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- pr_warn("martian destination %pI4 from %pI4, dev %s\n",
- &daddr, &saddr, dev->name);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
+ &daddr, &saddr, dev->name);
#endif
e_hostunreach:
@@ -3415,9 +3408,15 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
static __initdata unsigned long rhash_entries;
static int __init set_rhash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- rhash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &rhash_entries);
+ if (ret)
+ return 0;
+
return 1;
}
__setup("rhash_entries=", set_rhash_entries);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 565406287f6f..bb485fcb077e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
tp->pushed_seq = tp->write_seq;
}
-static inline int forced_push(const struct tcp_sock *tp)
+static inline bool forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
@@ -917,8 +917,7 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
@@ -978,39 +977,6 @@ static inline int select_size(const struct sock *sk, bool sg)
return tmp;
}
-static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
-{
- struct sk_buff *skb;
- struct tcphdr *th;
- bool fragstolen;
-
- skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
- if (!skb)
- goto err;
-
- th = (struct tcphdr *)skb_put(skb, sizeof(*th));
- skb_reset_transport_header(skb);
- memset(th, 0, sizeof(*th));
-
- if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
- goto err_free;
-
- TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
- TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
- TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
-
- if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
- WARN_ON_ONCE(fragstolen); /* should not happen */
- __kfree_skb(skb);
- }
- return size;
-
-err_free:
- kfree_skb(skb);
-err:
- return -ENOMEM;
-}
-
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
@@ -1115,7 +1081,7 @@ new_segment:
if (err)
goto do_fault;
} else {
- int merge = 0;
+ bool merge = false;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = sk->sk_sndmsg_page;
int off;
@@ -1129,7 +1095,7 @@ new_segment:
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
- merge = 1;
+ merge = true;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
@@ -1326,7 +1292,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
- int time_to_ack = 0;
+ bool time_to_ack = false;
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
@@ -1352,7 +1318,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
!icsk->icsk_ack.pingpong)) &&
!atomic_read(&sk->sk_rmem_alloc)))
- time_to_ack = 1;
+ time_to_ack = true;
}
/* We send an ACK if we can now advertise a non-zero window
@@ -1374,7 +1340,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
* "Lots" means "at least twice" here.
*/
if (new_window && new_window >= 2 * rcv_window_now)
- time_to_ack = 1;
+ time_to_ack = true;
}
}
if (time_to_ack)
@@ -1506,11 +1472,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
break;
}
if (tcp_hdr(skb)->fin) {
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
++seq;
break;
}
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
if (!desc->count)
break;
tp->copied_seq = seq;
@@ -1546,7 +1512,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int target; /* Read at least this many bytes */
long timeo;
struct task_struct *user_recv = NULL;
- int copied_early = 0;
+ bool copied_early = false;
struct sk_buff *skb;
u32 urg_hole = 0;
@@ -1778,9 +1744,9 @@ do_prequeue:
}
if ((flags & MSG_PEEK) &&
(peek_seq - copied - urg_hole != tp->copied_seq)) {
- if (net_ratelimit())
- printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
- current->comm, task_pid_nr(current));
+ net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
+ current->comm,
+ task_pid_nr(current));
peek_seq = tp->copied_seq;
}
continue;
@@ -1834,7 +1800,7 @@ do_prequeue:
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
- copied_early = 1;
+ copied_early = true;
} else
#endif
@@ -1868,7 +1834,7 @@ skip_copy:
goto found_fin_ok;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
continue;
@@ -1877,7 +1843,7 @@ skip_copy:
++*seq;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
break;
} while (len > 0);
@@ -2035,10 +2001,10 @@ bool tcp_check_oom(struct sock *sk, int shift)
too_many_orphans = tcp_too_many_orphans(sk, shift);
out_of_socket_memory = tcp_out_of_memory(sk);
- if (too_many_orphans && net_ratelimit())
- pr_info("too many orphaned sockets\n");
- if (out_of_socket_memory && net_ratelimit())
- pr_info("out of memory -- consider tuning tcp_mem\n");
+ if (too_many_orphans)
+ net_info_ratelimited("too many orphaned sockets\n");
+ if (out_of_socket_memory)
+ net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
return too_many_orphans || out_of_socket_memory;
}
@@ -2204,7 +2170,7 @@ EXPORT_SYMBOL(tcp_close);
/* These states need RST on ABORT according to RFC793 */
-static inline int tcp_need_reset(int state)
+static inline bool tcp_need_reset(int state)
{
return (1 << state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2278,7 +2244,7 @@ int tcp_disconnect(struct sock *sk, int flags)
}
EXPORT_SYMBOL(tcp_disconnect);
-static inline int tcp_can_repair_sock(struct sock *sk)
+static inline bool tcp_can_repair_sock(const struct sock *sk)
{
return capable(CAP_NET_ADMIN) &&
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
@@ -3205,13 +3171,13 @@ out_free:
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
struct tcp_md5sig_pool __percpu *pool;
- int alloc = 0;
+ bool alloc = false;
retry:
spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
- alloc = 1;
+ alloc = true;
spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
@@ -3495,9 +3461,15 @@ extern struct tcp_congestion_ops tcp_reno;
static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- thash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &thash_entries);
+ if (ret)
+ return 0;
+
return 1;
}
__setup("thash_entries=", set_thash_entries);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 272a84593c85..04dbd7ae7c62 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
/* RFC2861 Check whether we are limited by application or congestion window
* This is the inverse of cwnd check in tcp_tso_should_defer
*/
-int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 left;
if (in_flight >= tp->snd_cwnd)
- return 1;
+ return true;
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
left * tp->mss_cache < sk->sk_gso_max_size)
- return 1;
+ return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index fe3ecf484b44..57bdd17dff4d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -15,7 +15,7 @@
/* Tcp Hybla structure. */
struct hybla {
- u8 hybla_en;
+ bool hybla_en;
u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
u32 rho; /* Rho parameter, integer part */
u32 rho2; /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
u32 minrtt; /* Minimum smoothed round trip time value seen */
};
-/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
- expressed in jiffies */
+/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
static int rtt0 = 25;
module_param(rtt0, int, 0644);
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
ca->rho = ca->rho_3ls >> 3;
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
- ca->rho2 = ca->rho2_7ls >>7;
+ ca->rho2 = ca->rho2_7ls >> 7;
}
static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
ca->rho_3ls = 0;
ca->rho2_7ls = 0;
ca->snd_cwnd_cents = 0;
- ca->hybla_en = 1;
+ ca->hybla_en = true;
tp->snd_cwnd = 2;
tp->snd_cwnd_clamp = 65535;
@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
static void hybla_state(struct sock *sk, u8 ca_state)
{
struct hybla *ca = inet_csk_ca(sk);
+
ca->hybla_en = (ca_state == TCP_CA_Open);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb58b94301ec..cfa2aa128342 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -196,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk)
* and the session is not interactive.
*/
-static inline int tcp_in_quickack_mode(const struct sock *sk)
+static inline bool tcp_in_quickack_mode(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+
return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
}
@@ -253,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
- return 1;
- return 0;
+ return true;
+ return false;
}
/* Buffer size and advertised window tuning.
@@ -981,12 +982,12 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
NET_INC_STATS_BH(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
- printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
- tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
- tp->reordering,
- tp->fackets_out,
- tp->sacked_out,
- tp->undo_marker ? tp->undo_retrans : 0);
+ pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
+ tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
+ tp->reordering,
+ tp->fackets_out,
+ tp->sacked_out,
+ tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp);
}
@@ -1123,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
* the exact amount is rather hard to quantify. However, tp->max_window can
* be used as an exaggerated estimate.
*/
-static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
- u32 start_seq, u32 end_seq)
+static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
+ u32 start_seq, u32 end_seq)
{
/* Too far in future, or reversed (interpretation is ambiguous) */
if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
- return 0;
+ return false;
/* Nasty start_seq wrap-around check (see comments above) */
if (!before(start_seq, tp->snd_nxt))
- return 0;
+ return false;
/* In outstanding window? ...This is valid exit for D-SACKs too.
* start_seq == snd_una is non-sensical (see comments above)
*/
if (after(start_seq, tp->snd_una))
- return 1;
+ return true;
if (!is_dsack || !tp->undo_marker)
- return 0;
+ return false;
/* ...Then it's D-SACK, and must reside below snd_una completely */
if (after(end_seq, tp->snd_una))
- return 0;
+ return false;
if (!before(start_seq, tp->undo_marker))
- return 1;
+ return true;
/* Too old */
if (!after(end_seq, tp->undo_marker))
- return 0;
+ return false;
/* Undo_marker boundary crossing (overestimates a lot). Known already:
* start_seq < undo_marker and end_seq >= undo_marker.
@@ -1224,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk)
tp->lost_retrans_low = new_low_seq;
}
-static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
- struct tcp_sack_block_wire *sp, int num_sacks,
- u32 prior_snd_una)
+static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
+ struct tcp_sack_block_wire *sp, int num_sacks,
+ u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
- int dup_sack = 0;
+ bool dup_sack = false;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
- dup_sack = 1;
+ dup_sack = true;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
@@ -1243,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
- dup_sack = 1;
+ dup_sack = true;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPDSACKOFORECV);
@@ -1274,9 +1275,10 @@ struct tcp_sacktag_state {
* FIXME: this could be merged to shift decision code
*/
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
- u32 start_seq, u32 end_seq)
+ u32 start_seq, u32 end_seq)
{
- int in_sack, err;
+ int err;
+ bool in_sack;
unsigned int pkt_len;
unsigned int mss;
@@ -1322,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- int dup_sack, int pcount)
+ bool dup_sack, int pcount)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1402,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Shift newly-SACKed bytes from this skb to the immediately previous
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
*/
-static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
- struct tcp_sacktag_state *state,
- unsigned int pcount, int shifted, int mss,
- int dup_sack)
+static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sacktag_state *state,
+ unsigned int pcount, int shifted, int mss,
+ bool dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1455,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
if (skb->len > 0) {
BUG_ON(!tcp_skb_pcount(skb));
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
- return 0;
+ return false;
}
/* Whole SKB was eaten :-) */
@@ -1478,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
- return 1;
+ return true;
}
/* I wish gso_size would have a bit more sane initialization than
@@ -1501,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb)
static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
- int dup_sack)
+ bool dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev;
@@ -1640,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
struct tcp_sack_block *next_dup,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
- int dup_sack_in)
+ bool dup_sack_in)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *tmp;
tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;
- int dup_sack = dup_sack_in;
+ bool dup_sack = dup_sack_in;
if (skb == tcp_send_head(sk))
break;
@@ -1662,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
next_dup->start_seq,
next_dup->end_seq);
if (in_sack > 0)
- dup_sack = 1;
+ dup_sack = true;
}
/* skb reference here is a bit tricky to get right, since
@@ -1767,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
- int found_dup_sack = 0;
+ bool found_dup_sack = false;
int i, j;
int first_sack_index;
@@ -1798,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
used_sacks = 0;
first_sack_index = 0;
for (i = 0; i < num_sacks; i++) {
- int dup_sack = !i && found_dup_sack;
+ bool dup_sack = !i && found_dup_sack;
sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1865,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
while (i < used_sacks) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;
- int dup_sack = (found_dup_sack && (i == first_sack_index));
+ bool dup_sack = (found_dup_sack && (i == first_sack_index));
struct tcp_sack_block *next_dup = NULL;
if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1967,9 +1969,9 @@ out:
}
/* Limits sacked_out so that sum with lost_out isn't ever larger than
- * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
+ * packets_out. Returns false if sacked_out adjustement wasn't necessary.
*/
-static int tcp_limit_reno_sacked(struct tcp_sock *tp)
+static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
{
u32 holes;
@@ -1978,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp)
if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* If we receive more dupacks than we expected counting segments
@@ -2034,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp)
/* F-RTO can only be used if TCP has never retransmitted anything other than
* head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
*/
-int tcp_use_frto(struct sock *sk)
+bool tcp_use_frto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
if (!sysctl_tcp_frto)
- return 0;
+ return false;
/* MTU probe and F-RTO won't really play nicely along currently */
if (icsk->icsk_mtup.probe_size)
- return 0;
+ return false;
if (tcp_is_sackfrto(tp))
- return 1;
+ return true;
/* Avoid expensive walking of rexmit queue if possible */
if (tp->retrans_out > 1)
- return 0;
+ return false;
skb = tcp_write_queue_head(sk);
if (tcp_skb_is_last(sk, skb))
- return 1;
+ return true;
skb = tcp_write_queue_next(sk, skb); /* Skips head */
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
- return 0;
+ return false;
/* Short-circuit when first non-SACKed skb has been checked */
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
break;
}
- return 1;
+ return true;
}
/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2303,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how)
*
* Do processing similar to RTO timeout.
*/
-static int tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int flag)
{
if (flag & FLAG_SACK_RENEGING) {
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2314,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
icsk->icsk_rto, TCP_RTO_MAX);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2472,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk)
* Main question: may we further continue forward transmission
* with the same cwnd?
*/
-static int tcp_time_to_recover(struct sock *sk, int flag)
+static bool tcp_time_to_recover(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out;
/* Do not perform any recovery during F-RTO algorithm */
if (tp->frto_counter)
- return 0;
+ return false;
/* Trick#1: The loss is proven. */
if (tp->lost_out)
- return 1;
+ return true;
/* Not-A-Trick#2 : Classic rule... */
if (tcp_dupack_heuristics(tp) > tp->reordering)
- return 1;
+ return true;
/* Trick#3 : when we use RFC2988 timer restart, fast
* retransmit can be triggered by timeout of queue head.
*/
if (tcp_is_fack(tp) && tcp_head_timedout(sk))
- return 1;
+ return true;
/* Trick#4: It is still not OK... But will it be useful to delay
* recovery more?
@@ -2505,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
/* We have nothing to send. This connection is limited
* either by receiver window or by application.
*/
- return 1;
+ return true;
}
/* If a thin stream is detected, retransmit after first
@@ -2516,7 +2518,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
tcp_is_sack(tp) && !tcp_send_head(sk))
- return 1;
+ return true;
/* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
* retransmissions due to small network reorderings, we implement
@@ -2528,7 +2530,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
!tcp_may_send_now(sk))
return !tcp_pause_early_retransmit(sk, flag);
- return 0;
+ return false;
}
/* New heuristics: it is possible only after we switched to restart timer
@@ -2716,22 +2718,22 @@ static void DBGUNDO(struct sock *sk, const char *msg)
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
- msg,
- &inet->inet_daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
- tp->snd_ssthresh, tp->prior_ssthresh,
- tp->packets_out);
+ pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
+ msg,
+ &inet->inet_daddr, ntohs(inet->inet_dport),
+ tp->snd_cwnd, tcp_left_out(tp),
+ tp->snd_ssthresh, tp->prior_ssthresh,
+ tp->packets_out);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
- msg,
- &np->daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
- tp->snd_ssthresh, tp->prior_ssthresh,
- tp->packets_out);
+ pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
+ msg,
+ &np->daddr, ntohs(inet->inet_dport),
+ tp->snd_cwnd, tcp_left_out(tp),
+ tp->snd_ssthresh, tp->prior_ssthresh,
+ tp->packets_out);
}
#endif
}
@@ -2767,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp)
}
/* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk)
+static bool tcp_try_undo_recovery(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2792,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk)
* is ACKed. For Reno it is MUST to prevent false
* fast retransmits (RFC2582). SACK TCP is safe. */
tcp_moderate_cwnd(tp);
- return 1;
+ return true;
}
tcp_set_ca_state(sk, TCP_CA_Open);
- return 0;
+ return false;
}
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2825,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk)
* that successive retransmissions of a segment must not advance
* retrans_stamp under any conditions.
*/
-static int tcp_any_retrans_done(const struct sock *sk)
+static bool tcp_any_retrans_done(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (tp->retrans_out)
- return 1;
+ return true;
skb = tcp_write_queue_head(sk);
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
- return 1;
+ return true;
- return 0;
+ return false;
}
/* Undo during fast recovery after partial ACK. */
@@ -2871,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
}
/* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk)
+static bool tcp_try_undo_loss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2893,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk)
tp->undo_marker = 0;
if (tcp_is_sack(tp))
tcp_set_ca_state(sk, TCP_CA_Open);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static inline void tcp_complete_cwr(struct sock *sk)
@@ -3370,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
u32 now = tcp_time_stamp;
- int fully_acked = 1;
+ int fully_acked = true;
int flag = 0;
u32 pkts_acked = 0;
u32 reord = tp->packets_out;
@@ -3394,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!acked_pcount)
break;
- fully_acked = 0;
+ fully_acked = false;
} else {
acked_pcount = tcp_skb_pcount(skb);
}
@@ -3511,18 +3513,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!tp->packets_out && tcp_is_sack(tp)) {
icsk = inet_csk(sk);
if (tp->lost_out) {
- printk(KERN_DEBUG "Leak l=%u %d\n",
- tp->lost_out, icsk->icsk_ca_state);
+ pr_debug("Leak l=%u %d\n",
+ tp->lost_out, icsk->icsk_ca_state);
tp->lost_out = 0;
}
if (tp->sacked_out) {
- printk(KERN_DEBUG "Leak s=%u %d\n",
- tp->sacked_out, icsk->icsk_ca_state);
+ pr_debug("Leak s=%u %d\n",
+ tp->sacked_out, icsk->icsk_ca_state);
tp->sacked_out = 0;
}
if (tp->retrans_out) {
- printk(KERN_DEBUG "Leak r=%u %d\n",
- tp->retrans_out, icsk->icsk_ca_state);
+ pr_debug("Leak r=%u %d\n",
+ tp->retrans_out, icsk->icsk_ca_state);
tp->retrans_out = 0;
}
}
@@ -3673,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
-static int tcp_process_frto(struct sock *sk, int flag)
+static bool tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3689,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
if (!before(tp->snd_una, tp->frto_highmark)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
- return 1;
+ return true;
}
if (!tcp_is_sackfrto(tp)) {
@@ -3698,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag)
* data, winupdate
*/
if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
- return 1;
+ return true;
if (!(flag & FLAG_DATA_ACKED)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
flag);
- return 1;
+ return true;
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
- return 1;
+ return true;
}
if ((tp->frto_counter >= 2) &&
@@ -3720,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag)
/* RFC4138 shortcoming (see comment above) */
if (!(flag & FLAG_FORWARD_PROGRESS) &&
(flag & FLAG_NOT_DUP))
- return 1;
+ return true;
tcp_enter_frto_loss(sk, 3, flag);
- return 1;
+ return true;
}
}
@@ -3735,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
if (!tcp_may_send_now(sk))
tcp_enter_frto_loss(sk, 2, flag);
- return 1;
+ return true;
} else {
switch (sysctl_tcp_frto_response) {
case 2:
@@ -3752,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
tp->undo_marker = 0;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
}
- return 0;
+ return false;
}
/* This routine deals with incoming acks, but not outgoing ones. */
@@ -3770,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
int newly_acked_sacked = 0;
- int frto_cwnd = 0;
+ bool frto_cwnd = false;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3952,10 +3954,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
- if (net_ratelimit())
- pr_info("%s: Illegal window scaling value %d >14 received\n",
- __func__,
- snd_wscale);
+ net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n",
+ __func__,
+ snd_wscale);
snd_wscale = 14;
}
opt_rx->snd_wscale = snd_wscale;
@@ -4026,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
}
EXPORT_SYMBOL(tcp_parse_options);
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
+static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
{
const __be32 *ptr = (const __be32 *)(th + 1);
@@ -4037,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr;
tp->rx_opt.rcv_tsecr = ntohl(*ptr);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
-static int tcp_fast_parse_options(const struct sk_buff *skb,
- const struct tcphdr *th,
- struct tcp_sock *tp, const u8 **hvpp)
+static bool tcp_fast_parse_options(const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct tcp_sock *tp, const u8 **hvpp)
{
/* In the spirit of fast parsing, compare doff directly to constant
* values. Because equality is used, short doff can be ignored here.
*/
if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
- return 0;
+ return false;
} else if (tp->rx_opt.tstamp_ok &&
th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
if (tcp_parse_aligned_timestamp(tp, th))
- return 1;
+ return true;
}
tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
- return 1;
+ return true;
}
#ifdef CONFIG_TCP_MD5SIG
@@ -4302,7 +4303,7 @@ static void tcp_fin(struct sock *sk)
}
}
-static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
+static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
u32 end_seq)
{
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4310,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
sp->start_seq = seq;
if (after(end_seq, sp->end_seq))
sp->end_seq = end_seq;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4508,10 +4509,10 @@ static void tcp_ofo_queue(struct sock *sk)
}
}
-static int tcp_prune_ofo_queue(struct sock *sk);
+static bool tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);
-static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, size)) {
@@ -4548,84 +4549,23 @@ static bool tcp_try_coalesce(struct sock *sk,
struct sk_buff *from,
bool *fragstolen)
{
- int i, delta, len = from->len;
+ int delta;
*fragstolen = false;
- if (tcp_hdr(from)->fin || skb_cloned(to))
+ if (tcp_hdr(from)->fin)
return false;
-
- if (len <= skb_tailroom(to)) {
- BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
- goto merge;
- }
-
- if (skb_has_frag_list(to) || skb_has_frag_list(from))
+ if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false;
- if (skb_headlen(from) != 0) {
- struct page *page;
- unsigned int offset;
-
- if (skb_shinfo(to)->nr_frags +
- skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
- return false;
-
- if (skb_head_is_locked(from))
- return false;
-
- delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
-
- page = virt_to_head_page(from->head);
- offset = from->data - (unsigned char *)page_address(page);
-
- skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
- page, offset, skb_headlen(from));
- *fragstolen = true;
- } else {
- if (skb_shinfo(to)->nr_frags +
- skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
- return false;
-
- delta = from->truesize -
- SKB_TRUESIZE(skb_end_pointer(from) - from->head);
- }
-
- WARN_ON_ONCE(delta < len);
-
- memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
- skb_shinfo(from)->frags,
- skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
- skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
-
- if (!skb_cloned(from))
- skb_shinfo(from)->nr_frags = 0;
-
- /* if the skb is cloned this does nothing since we set nr_frags to 0 */
- for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
- skb_frag_ref(from, i);
-
- to->truesize += delta;
atomic_add(delta, &sk->sk_rmem_alloc);
sk_mem_charge(sk, delta);
- to->len += len;
- to->data_len += len;
-
-merge:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
return true;
}
-static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
-{
- if (head_stolen)
- kmem_cache_free(skbuff_head_cache, skb);
- else
- __kfree_skb(skb);
-}
-
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4746,7 +4686,7 @@ end:
skb_set_owner_r(skb, sk);
}
-int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
+static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
bool *fragstolen)
{
int eaten;
@@ -4763,6 +4703,42 @@ int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
return eaten;
}
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct sk_buff *skb;
+ struct tcphdr *th;
+ bool fragstolen;
+
+ if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
+ goto err;
+
+ skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
+ if (!skb)
+ goto err;
+
+ th = (struct tcphdr *)skb_put(skb, sizeof(*th));
+ skb_reset_transport_header(skb);
+ memset(th, 0, sizeof(*th));
+
+ if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+ goto err_free;
+
+ TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
+ TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
+
+ if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
+ WARN_ON_ONCE(fragstolen); /* should not happen */
+ __kfree_skb(skb);
+ }
+ return size;
+
+err_free:
+ kfree_skb(skb);
+err:
+ return -ENOMEM;
+}
+
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = tcp_hdr(skb);
@@ -5057,10 +5033,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
* Purge the out-of-order queue.
* Return true if queue was pruned.
*/
-static int tcp_prune_ofo_queue(struct sock *sk)
+static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- int res = 0;
+ bool res = false;
if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -5074,7 +5050,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
if (tp->rx_opt.sack_ok)
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
- res = 1;
+ res = true;
}
return res;
}
@@ -5151,7 +5127,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static int tcp_should_expand_sndbuf(const struct sock *sk)
+static bool tcp_should_expand_sndbuf(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -5159,21 +5135,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
* not modify it.
*/
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
- return 0;
+ return false;
/* If we are under global TCP memory pressure, do not expand. */
if (sk_under_memory_pressure(sk))
- return 0;
+ return false;
/* If we are under soft global TCP memory pressure, do not expand. */
if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
- return 0;
+ return false;
/* If we filled the congestion window, do not expand. */
if (tp->packets_out >= tp->snd_cwnd)
- return 0;
+ return false;
- return 1;
+ return true;
}
/* When incoming ACK allowed to free some skb from write_queue,
@@ -5399,16 +5375,16 @@ static inline int tcp_checksum_complete_user(struct sock *sk,
}
#ifdef CONFIG_NET_DMA
-static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
+static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int dma_cookie;
- int copied_early = 0;
+ bool copied_early = false;
if (tp->ucopy.wakeup)
- return 0;
+ return false;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
@@ -5424,7 +5400,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
goto out;
tp->ucopy.dma_cookie = dma_cookie;
- copied_early = 1;
+ copied_early = true;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4ff5e1f70d16..a43b87dfe800 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -866,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
}
/*
- * Return 1 if a syncookie should be sent
+ * Return true if a syncookie should be sent
*/
-int tcp_syn_flood_action(struct sock *sk,
+bool tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto)
{
const char *msg = "Dropping request";
- int want_cookie = 0;
+ bool want_cookie = false;
struct listen_sock *lopt;
@@ -881,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk,
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
msg = "Sending cookies";
- want_cookie = 1;
+ want_cookie = true;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
} else
#endif
@@ -1196,7 +1196,7 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1219,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
- return 0;
+ return false;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- return 1;
+ return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- return 1;
+ return true;
}
/* Okay, so this is hash_expected and hash_location -
@@ -1239,15 +1239,14 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
- if (net_ratelimit()) {
- pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
- &iph->saddr, ntohs(th->source),
- &iph->daddr, ntohs(th->dest),
- genhash ? " tcp_v4_calc_md5_hash failed" : "");
- }
- return 1;
+ net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+ &iph->saddr, ntohs(th->source),
+ &iph->daddr, ntohs(th->dest),
+ genhash ? " tcp_v4_calc_md5_hash failed"
+ : "");
+ return true;
}
- return 0;
+ return false;
}
#endif
@@ -1281,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
- int want_cookie = 0;
+ bool want_cookie = false;
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1340,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
- want_cookie = 0; /* not our kind of cookie */
+ want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
@@ -2074,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc;
}
-static inline int empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(struct tcp_iter_state *st)
{
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6f6a91832826..b85d9fe7d663 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
* state.
*/
-static int tcp_remember_stamp(struct sock *sk)
+static bool tcp_remember_stamp(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
}
if (release_it)
inet_putpeer(peer);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
{
struct sock *sk = (struct sock *) tw;
struct inet_peer *peer;
@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
+static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
- return 1;
+ return true;
if (after(end_seq, s_win) && before(seq, e_win))
- return 1;
+ return true;
return seq == e_win && seq == end_seq;
}
@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_options_received tmp_opt;
const u8 *hash_location;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
- int paws_reject = 0;
+ bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
- int recycle_ok = 0;
+ bool recycle_ok = false;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tcp_remember_stamp(sk);
@@ -575,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
- int paws_reject = 0;
+ bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d94733009923..803cbfe82fbc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -34,6 +34,8 @@
*
*/
+#define pr_fmt(fmt) "TCP: " fmt
+
#include <net/tcp.h>
#include <linux/compiler.h>
@@ -368,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->end_seq = seq;
}
-static inline int tcp_urg_mode(const struct tcp_sock *tp)
+static inline bool tcp_urg_mode(const struct tcp_sock *tp)
{
return tp->snd_una != tp->snd_up;
}
@@ -1389,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
}
/* Minshall's variant of the Nagle send check. */
-static inline int tcp_minshall_check(const struct tcp_sock *tp)
+static inline bool tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
-/* Return 0, if packet can be sent now without violation Nagle's rules:
+/* Return false, if packet can be sent now without violation Nagle's rules:
* 1. It is full sized.
* 2. Or it contains FIN. (already checked by caller)
* 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
* With Minshall's modification: all sent small packets are ACKed.
*/
-static inline int tcp_nagle_check(const struct tcp_sock *tp,
+static inline bool tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned int mss_now, int nonagle)
{
@@ -1411,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
-/* Return non-zero if the Nagle test allows this packet to be
+/* Return true if the Nagle test allows this packet to be
* sent now.
*/
-static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
- unsigned int cur_mss, int nonagle)
+static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
+ unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
* write_queue (they have no chances to get new data).
@@ -1424,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
* argument based upon the location of SKB in the send queue.
*/
if (nonagle & TCP_NAGLE_PUSH)
- return 1;
+ return true;
/* Don't use the nagle rule for urgent data (or for the final FIN).
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
- return 1;
+ return true;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
- return 1;
+ return true;
- return 0;
+ return false;
}
/* Does at least the first segment of SKB fit into the send window? */
-static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
- unsigned int cur_mss)
+static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
+ const struct sk_buff *skb,
+ unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1474,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
}
/* Test if sending is allowed right now. */
-int tcp_may_send_now(struct sock *sk)
+bool tcp_may_send_now(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
@@ -1544,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
*
* This algorithm is from John Heffner.
*/
-static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1604,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* Ok, it looks like it is advisable to defer. */
tp->tso_deferred = 1 | (jiffies << 1);
- return 1;
+ return true;
send_now:
tp->tso_deferred = 0;
- return 0;
+ return false;
}
/* Create a new MTU probe if we are ready.
@@ -1750,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
* account rare use of URG, this is not a big flaw.
*
- * Returns 1, if no segments are in flight and we have queued segments, but
- * cannot send anything now because of SWS or another problem.
+ * Returns true, if no segments are in flight and we have queued segments,
+ * but cannot send anything now because of SWS or another problem.
*/
-static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
- int push_one, gfp_t gfp)
+static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1768,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
if (!result) {
- return 0;
+ return false;
} else if (result > 0) {
sent_pkts = 1;
}
@@ -1827,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (likely(sent_pkts)) {
tcp_cwnd_validate(sk);
- return 0;
+ return false;
}
return !tp->packets_out && tcp_send_head(sk);
}
@@ -2026,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
}
/* Check if coalescing SKBs is legal. */
-static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
+static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
- return 0;
+ return false;
/* TODO: SACK collapsing could be used to remove this condition */
if (skb_shinfo(skb)->nr_frags != 0)
- return 0;
+ return false;
if (skb_cloned(skb))
- return 0;
+ return false;
if (skb == tcp_send_head(sk))
- return 0;
+ return false;
/* Some heurestics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
- return 0;
+ return false;
- return 1;
+ return true;
}
/* Collapse packets in the retransmit queue to make to create
@@ -2052,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = to, *tmp;
- int first = 1;
+ bool first = true;
if (!sysctl_tcp_retrans_collapse)
return;
@@ -2066,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
space -= skb->len;
if (first) {
- first = 0;
+ first = false;
continue;
}
@@ -2181,8 +2184,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
- if (net_ratelimit())
- printk(KERN_DEBUG "retrans_out leaked.\n");
+ net_dbg_ratelimited("retrans_out leaked\n");
}
#endif
if (!tp->retrans_out)
@@ -2207,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Check if we forward retransmits are possible in the current
* window/congestion state.
*/
-static int tcp_can_forward_retransmit(struct sock *sk)
+static bool tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
- return 0;
+ return false;
/* No forward retransmissions in Reno are possible. */
if (tcp_is_reno(tp))
- return 0;
+ return false;
/* Yeah, we have to make difficult choice between forward transmission
* and retransmission... Both ways have their merits...
@@ -2229,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
*/
if (tcp_may_send_now(sk))
- return 0;
+ return false;
- return 1;
+ return true;
}
/* This gets called after a retransmit timeout, and the initially
@@ -2416,7 +2418,7 @@ int tcp_send_synack(struct sock *sk)
skb = tcp_write_queue_head(sk);
if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
- printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
+ pr_debug("%s: wrong queue state\n", __func__);
return -EFAULT;
}
if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 279fd0846302..609397ee78fb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2173,9 +2173,15 @@ void udp4_proc_exit(void)
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- uhash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &uhash_entries);
+ if (ret)
+ return 0;
+
if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
uhash_entries = UDP_HTABLE_SIZE_MIN;
return 1;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 36d7437ac054..5728695b5449 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,7 +69,7 @@ config IPV6_OPTIMISTIC_DAD
config INET6_AH
tristate "IPv6: AH transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
@@ -81,7 +81,7 @@ config INET6_AH
config INET6_ESP
tristate "IPv6: ESP transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_AUTHENC
select CRYPTO_HMAC
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e3b3421f8dad..8f6411c97189 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -38,6 +38,8 @@
* status etc.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -66,6 +68,7 @@
#include <net/sock.h>
#include <net/snmp.h>
+#include <net/af_ieee802154.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/ndisc.h>
@@ -326,11 +329,11 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
+ pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead) {
- pr_warning("Freeing alive inet6 device %p\n", idev);
+ pr_warn("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
@@ -371,7 +374,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (snmp6_alloc_dev(ndev) < 0) {
ADBG((KERN_WARNING
- "%s(): cannot allocate memory for statistics; dev=%s.\n",
+ "%s: cannot allocate memory for statistics; dev=%s.\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
@@ -381,7 +384,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (snmp6_register_dev(ndev) < 0) {
ADBG((KERN_WARNING
- "%s(): cannot create /proc/net/dev_snmp6/%s\n",
+ "%s: cannot create /proc/net/dev_snmp6/%s\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
@@ -399,9 +402,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
- printk(KERN_INFO
- "%s: Disabled Multicast RS\n",
- dev->name);
+ pr_info("%s: Disabled Multicast RS\n", dev->name);
ndev->cnf.rtr_solicits = 0;
}
#endif
@@ -541,7 +542,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
WARN_ON(!hlist_unhashed(&ifp->addr_lst));
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
+ pr_debug("%s\n", __func__);
#endif
in6_dev_put(ifp->idev);
@@ -550,7 +551,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
- pr_warning("Freeing alive inet6 address %p\n", ifp);
+ pr_warn("Freeing alive inet6 address %p\n", ifp);
return;
}
dst_release(&ifp->rt->dst);
@@ -840,8 +841,7 @@ retry:
in6_dev_hold(idev);
if (idev->cnf.use_tempaddr <= 0) {
write_unlock(&idev->lock);
- printk(KERN_INFO
- "ipv6_create_tempaddr(): use_tempaddr is disabled.\n");
+ pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
ret = -1;
goto out;
@@ -851,8 +851,8 @@ retry:
idev->cnf.use_tempaddr = -1; /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
- printk(KERN_WARNING
- "ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n");
+ pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
+ __func__);
in6_dev_put(idev);
ret = -1;
goto out;
@@ -862,8 +862,8 @@ retry:
if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
- printk(KERN_WARNING
- "ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n");
+ pr_warn("%s: regeneration of randomized interface id failed\n",
+ __func__);
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
@@ -913,8 +913,7 @@ retry:
if (!ift || IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
- printk(KERN_INFO
- "ipv6_create_tempaddr(): retry temporary address regeneration.\n");
+ pr_info("%s: retry temporary address regeneration\n", __func__);
tmpaddr = &addr;
write_lock(&idev->lock);
goto retry;
@@ -1414,9 +1413,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
return;
}
- if (net_ratelimit())
- printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
- ifp->idev->dev->name, &ifp->addr);
+ net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
+ ifp->idev->dev->name, &ifp->addr);
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
struct in6_addr addr;
@@ -1429,7 +1427,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
/* DAD failed for link-local based on MAC address */
idev->cnf.disable_ipv6 = 1;
- printk(KERN_INFO "%s: IPv6 being disabled!\n",
+ pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
}
}
@@ -1514,6 +1512,14 @@ static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
return 0;
}
+static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
+{
+ if (dev->addr_len != IEEE802154_ADDR_LEN)
+ return -1;
+ memcpy(eui, dev->dev_addr, 8);
+ return 0;
+}
+
static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
{
/* XXX: inherit EUI-64 from other interface -- yoshfuji */
@@ -1567,7 +1573,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
- case ARPHRD_IEEE802_TR:
return addrconf_ifid_eui48(eui, dev);
case ARPHRD_ARCNET:
return addrconf_ifid_arcnet(eui, dev);
@@ -1577,6 +1582,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
return addrconf_ifid_gre(eui, dev);
+ case ARPHRD_IEEE802154:
+ return addrconf_ifid_eui64(eui, dev);
}
return -1;
}
@@ -1650,9 +1657,8 @@ static void ipv6_regen_rndid(unsigned long data)
idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
idev->cnf.max_desync_factor * HZ;
if (time_before(expires, jiffies)) {
- printk(KERN_WARNING
- "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
- idev->dev->name);
+ pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
+ __func__, idev->dev->name);
goto out;
}
@@ -1836,16 +1842,15 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
prefered_lft = ntohl(pinfo->prefered);
if (prefered_lft > valid_lft) {
- if (net_ratelimit())
- printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n");
+ net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
return;
}
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name);
+ net_dbg_ratelimited("addrconf: device %s not configured\n",
+ dev->name);
return;
}
@@ -1920,9 +1925,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
}
goto ok;
}
- if (net_ratelimit())
- printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n",
- pinfo->prefix_len);
+ net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
+ pinfo->prefix_len);
in6_dev_put(in6_dev);
return;
@@ -2400,7 +2404,7 @@ static void init_loopback(struct net_device *dev)
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init loopback: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2436,9 +2440,9 @@ static void addrconf_dev_config(struct net_device *dev)
if ((dev->type != ARPHRD_ETHER) &&
(dev->type != ARPHRD_FDDI) &&
- (dev->type != ARPHRD_IEEE802_TR) &&
(dev->type != ARPHRD_ARCNET) &&
- (dev->type != ARPHRD_INFINIBAND)) {
+ (dev->type != ARPHRD_INFINIBAND) &&
+ (dev->type != ARPHRD_IEEE802154)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -2468,7 +2472,7 @@ static void addrconf_sit_config(struct net_device *dev)
*/
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init sit: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2498,12 +2502,12 @@ static void addrconf_gre_config(struct net_device *dev)
struct inet6_dev *idev;
struct in6_addr addr;
- pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
+ pr_info("%s(%s)\n", __func__, dev->name);
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init gre: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2543,7 +2547,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
- printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n");
+ pr_debug("init ip6-ip6: add_linklocal failed\n");
}
/*
@@ -2559,7 +2563,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
idev = addrconf_add_dev(dev);
if (IS_ERR(idev)) {
- printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
+ pr_debug("init ip6-ip6: add_dev failed\n");
return;
}
ip6_tnl_add_linklocal(idev);
@@ -2590,9 +2594,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (event == NETDEV_UP) {
if (!addrconf_qdisc_ok(dev)) {
/* device is not ready yet. */
- printk(KERN_INFO
- "ADDRCONF(NETDEV_UP): %s: "
- "link is not ready\n",
+ pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
dev->name);
break;
}
@@ -2617,10 +2619,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
idev->if_flags |= IF_READY;
}
- printk(KERN_INFO
- "ADDRCONF(NETDEV_CHANGE): %s: "
- "link becomes ready\n",
- dev->name);
+ pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
+ dev->name);
run_pending = 1;
}
@@ -2891,8 +2891,7 @@ static void addrconf_rs_timer(unsigned long data)
* Note: we do not support deprecated "all on-link"
* assumption any longer.
*/
- printk(KERN_DEBUG "%s: no IPv6 routers present\n",
- idev->dev->name);
+ pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
}
out:
@@ -4748,8 +4747,8 @@ int __init addrconf_init(void)
err = ipv6_addr_label_init();
if (err < 0) {
- printk(KERN_CRIT "IPv6 Addrconf:"
- " cannot initialize default policy table: %d.\n", err);
+ pr_crit("%s: cannot initialize default policy table: %d\n",
+ __func__, err);
goto out;
}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 2d8ddba9ee58..eb6a63632d3c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -129,7 +129,7 @@ static void ip6addrlbl_free_rcu(struct rcu_head *h)
ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu));
}
-static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p)
+static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p)
{
return atomic_inc_not_zero(&p->refcnt);
}
@@ -141,20 +141,20 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
}
/* Find label */
-static int __ip6addrlbl_match(struct net *net,
- struct ip6addrlbl_entry *p,
- const struct in6_addr *addr,
- int addrtype, int ifindex)
+static bool __ip6addrlbl_match(struct net *net,
+ const struct ip6addrlbl_entry *p,
+ const struct in6_addr *addr,
+ int addrtype, int ifindex)
{
if (!net_eq(ip6addrlbl_net(p), net))
- return 0;
+ return false;
if (p->ifindex && p->ifindex != ifindex)
- return 0;
+ return false;
if (p->addrtype && p->addrtype != addrtype)
- return 0;
+ return false;
if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen))
- return 0;
- return 1;
+ return false;
+ return true;
}
static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
@@ -350,7 +350,7 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
int err = 0;
int i;
- ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
+ ADDRLABEL(KERN_DEBUG "%s\n", __func__);
for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
int ret = ip6addrlbl_add(net,
@@ -456,8 +456,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
- int prefixlen, int ifindex, u32 lseq)
+static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
+ int prefixlen, int ifindex, u32 lseq)
{
struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
ifal->ifal_family = AF_INET6;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0ad046c7ae95..e22e6d88bac6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
#include <linux/module.h>
#include <linux/capability.h>
@@ -77,7 +78,7 @@ struct ipv6_params ipv6_defaults = {
.autoconf = 1,
};
-static int disable_ipv6_mod = 0;
+static int disable_ipv6_mod;
module_param_named(disable, disable_ipv6_mod, int, 0444);
MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
@@ -256,7 +257,7 @@ out_rcu_unlock:
/* bind for INET6 API */
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -390,7 +391,6 @@ out_unlock:
rcu_read_unlock();
goto out;
}
-
EXPORT_SYMBOL(inet6_bind);
int inet6_release(struct socket *sock)
@@ -408,7 +408,6 @@ int inet6_release(struct socket *sock)
return inet_release(sock);
}
-
EXPORT_SYMBOL(inet6_release);
void inet6_destroy_sock(struct sock *sk)
@@ -419,10 +418,12 @@ void inet6_destroy_sock(struct sock *sk)
/* Release rx options */
- if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
+ skb = xchg(&np->pktoptions, NULL);
+ if (skb != NULL)
kfree_skb(skb);
- if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
+ skb = xchg(&np->rxpmtu, NULL);
+ if (skb != NULL)
kfree_skb(skb);
/* Free flowlabels */
@@ -430,10 +431,10 @@ void inet6_destroy_sock(struct sock *sk)
/* Free tx options */
- if ((opt = xchg(&np->opt, NULL)) != NULL)
+ opt = xchg(&np->opt, NULL);
+ if (opt != NULL)
sock_kfree_s(sk, opt, opt->tot_len);
}
-
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
/*
@@ -443,7 +444,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
- struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr;
+ struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +475,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(*sin);
return 0;
}
-
EXPORT_SYMBOL(inet6_getname);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -482,8 +482,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
- switch(cmd)
- {
+ switch (cmd) {
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *)arg);
@@ -509,7 +508,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
/*NOTREACHED*/
return 0;
}
-
EXPORT_SYMBOL(inet6_ioctl);
const struct proto_ops inet6_stream_ops = {
@@ -615,25 +613,21 @@ out:
return ret;
out_permanent:
- printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
- protocol);
+ pr_err("Attempt to override permanent protocol %d\n", protocol);
goto out;
out_illegal:
- printk(KERN_ERR
- "Ignoring attempt to register invalid socket type %d.\n",
+ pr_err("Ignoring attempt to register invalid socket type %d\n",
p->type);
goto out;
}
-
EXPORT_SYMBOL(inet6_register_protosw);
void
inet6_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
- printk(KERN_ERR
- "Attempt to unregister permanent protocol %d.\n",
+ pr_err("Attempt to unregister permanent protocol %d\n",
p->protocol);
} else {
spin_lock_bh(&inetsw6_lock);
@@ -643,7 +637,6 @@ inet6_unregister_protosw(struct inet_protosw *p)
synchronize_net();
}
}
-
EXPORT_SYMBOL(inet6_unregister_protosw);
int inet6_sk_rebuild_header(struct sock *sk)
@@ -683,13 +676,12 @@ int inet6_sk_rebuild_header(struct sock *sk)
return 0;
}
-
EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
-int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet6_skb_parm *opt = IP6CB(skb);
+ const struct ipv6_pinfo *np = inet6_sk(sk);
+ const struct inet6_skb_parm *opt = IP6CB(skb);
if (np->rxopt.all) {
if ((opt->hop && (np->rxopt.bits.hopopts ||
@@ -701,11 +693,10 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
np->rxopt.bits.osrcrt)) ||
((opt->dst1 || opt->dst0) &&
(np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-
EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
@@ -1070,13 +1061,11 @@ static int __init inet6_init(void)
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
/* Register the socket-side information for inet6_create. */
- for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
if (disable_ipv6_mod) {
- printk(KERN_INFO
- "IPv6: Loaded, but administratively disabled, "
- "reboot required to enable\n");
+ pr_info("Loaded, but administratively disabled, reboot required to enable\n");
goto out;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2ae79dbeec2f..f1a4a2c28ed3 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -24,6 +24,8 @@
* This file is derived from net/ipv4/ah.c.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -111,7 +113,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
__alignof__(struct scatterlist));
}
-static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
+static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
{
u8 *opt = (u8 *)opthdr;
int len = ipv6_optlen(opthdr);
@@ -125,7 +127,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
switch (opt[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -143,10 +145,10 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
len -= optlen;
}
if (len == 0)
- return 1;
+ return true;
bad:
- return 0;
+ return false;
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -169,7 +171,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
switch (opt[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -189,8 +191,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
hao = (struct ipv6_destopt_hao *)&opt[off];
if (hao->length != sizeof(hao->addr)) {
- if (net_ratelimit())
- printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
+ net_warn_ratelimited("destopt hao: invalid header length: %u\n",
+ hao->length);
goto bad;
}
final_addr = hao->addr;
@@ -659,9 +661,9 @@ static int ah6_init_state(struct xfrm_state *x)
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_ahash_digestsize(ahash)) {
- printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
- x->aalg->alg_name, crypto_ahash_digestsize(ahash),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ pr_info("AH: %s digestsize %u != %hu\n",
+ x->aalg->alg_name, crypto_ahash_digestsize(ahash),
+ aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@@ -727,12 +729,12 @@ static const struct inet6_protocol ah6_protocol = {
static int __init ah6_init(void)
{
if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
- printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ah6_type, AF_INET6);
return -EAGAIN;
}
@@ -743,10 +745,10 @@ static int __init ah6_init(void)
static void __exit ah6_fini(void)
{
if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
- printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index db00d27ffb16..cdf02be5f191 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -342,7 +342,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
* check if the interface has this anycast address
* called with rcu_read_lock()
*/
-static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
+static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
{
struct inet6_dev *idev;
struct ifacaddr6 *aca;
@@ -356,16 +356,16 @@ static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *add
read_unlock_bh(&idev->lock);
return aca != NULL;
}
- return 0;
+ return false;
}
/*
* check if given interface (or any, if dev==0) has this anycast address
*/
-int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
- const struct in6_addr *addr)
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr)
{
- int found = 0;
+ bool found = false;
rcu_read_lock();
if (dev)
@@ -373,7 +373,7 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
else
for_each_netdev_rcu(net, dev)
if (ipv6_chk_acast_dev(dev, addr)) {
- found = 1;
+ found = true;
break;
}
rcu_read_unlock();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b8b61ac88bc2..be2b67d631e5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -34,9 +34,9 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
+static bool ipv6_mapped_addr_any(const struct in6_addr *a)
{
- return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
+ return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1ac7938dd9ec..1e62b7557b00 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,8 @@
* This file is derived from net/ipv4/esp.c
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h>
@@ -442,8 +444,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
- ntohl(esph->spi), &iph->daddr);
+ pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
+ ntohl(esph->spi), &iph->daddr);
xfrm_state_put(x);
}
@@ -651,11 +653,11 @@ static const struct inet6_protocol esp6_protocol = {
static int __init esp6_init(void)
{
if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
- printk(KERN_INFO "ipv6 esp init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&esp6_type, AF_INET6);
return -EAGAIN;
}
@@ -666,9 +668,9 @@ static int __init esp6_init(void)
static void __exit esp6_fini(void)
{
if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
- printk(KERN_INFO "ipv6 esp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(esp6_init);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index a93bd231eca1..6447dc49429f 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -75,7 +75,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
return offset;
switch (opttype) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -96,14 +96,14 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv);
/*
* Parsing tlv encoded headers.
*
- * Parsing function "func" returns 1, if parsing succeed
- * and 0, if it failed.
+ * Parsing function "func" returns true, if parsing succeed
+ * and false, if it failed.
* It MUST NOT touch skb->h.
*/
struct tlvtype_proc {
int type;
- int (*func)(struct sk_buff *skb, int offset);
+ bool (*func)(struct sk_buff *skb, int offset);
};
/*********************
@@ -112,11 +112,11 @@ struct tlvtype_proc {
/* An unknown option is detected, decide what to do */
-static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
+static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
{
switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
case 0: /* ignore */
- return 1;
+ return true;
case 1: /* drop packet */
break;
@@ -129,21 +129,22 @@ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
break;
case 2: /* send ICMP PARM PROB regardless and drop packet */
icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
- return 0;
+ return false;
}
kfree_skb(skb);
- return 0;
+ return false;
}
/* Parse tlv encoded option header (hop-by-hop or destination) */
-static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
+static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
{
- struct tlvtype_proc *curr;
+ const struct tlvtype_proc *curr;
const unsigned char *nh = skb_network_header(skb);
int off = skb_network_header_len(skb);
int len = (skb_transport_header(skb)[1] + 1) << 3;
+ int padlen = 0;
if (skb_transport_offset(skb) + len > skb_headlen(skb))
goto bad;
@@ -156,8 +157,11 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
int i;
switch (nh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
+ padlen++;
+ if (padlen > 7)
+ goto bad;
break;
case IPV6_TLV_PADN:
@@ -166,7 +170,8 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
* of 8. 7 is therefore the highest valid value.
* See also RFC 4942, Section 2.1.9.5.
*/
- if (optlen > 7)
+ padlen += optlen;
+ if (padlen > 7)
goto bad;
/* RFC 4942 recommends receiving hosts to
* actively check PadN payload to contain
@@ -186,25 +191,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
/* type specific length/alignment
checks will be performed in the
func(). */
- if (curr->func(skb, off) == 0)
- return 0;
+ if (curr->func(skb, off) == false)
+ return false;
break;
}
}
if (curr->type < 0) {
if (ip6_tlvopt_unknown(skb, off) == 0)
- return 0;
+ return false;
}
+ padlen = 0;
break;
}
off += optlen;
len -= optlen;
}
+ /* This case will not be caught by above check since its padding
+ * length is smaller than 7:
+ * 1 byte NH + 1 byte Length + 6 bytes Padding
+ */
+ if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
+ goto bad;
+
if (len == 0)
- return 1;
+ return true;
bad:
kfree_skb(skb);
- return 0;
+ return false;
}
/*****************************
@@ -212,7 +225,7 @@ bad:
*****************************/
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
+static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
{
struct ipv6_destopt_hao *hao;
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -266,15 +279,15 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
if (skb->tstamp.tv64 == 0)
__net_timestamp(skb);
- return 1;
+ return true;
discard:
kfree_skb(skb);
- return 0;
+ return false;
}
#endif
-static struct tlvtype_proc tlvprocdestopt_lst[] = {
+static const struct tlvtype_proc tlvprocdestopt_lst[] = {
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
{
.type = IPV6_TLV_HAO,
@@ -579,23 +592,23 @@ static inline struct net *ipv6_skb_net(struct sk_buff *skb)
/* Router Alert as of RFC 2711 */
-static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
if (nh[optoff + 1] == 2) {
IP6CB(skb)->ra = optoff;
- return 1;
+ return true;
}
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
nh[optoff + 1]);
kfree_skb(skb);
- return 0;
+ return false;
}
/* Jumbo payload */
-static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
struct net *net = ipv6_skb_net(skb);
@@ -614,13 +627,13 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
- return 0;
+ return false;
}
if (ipv6_hdr(skb)->payload_len) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
- return 0;
+ return false;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
@@ -632,14 +645,14 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
goto drop;
- return 1;
+ return true;
drop:
kfree_skb(skb);
- return 0;
+ return false;
}
-static struct tlvtype_proc tlvprochopopt_lst[] = {
+static const struct tlvtype_proc tlvprochopopt_lst[] = {
{
.type = IPV6_TLV_ROUTERALERT,
.func = ipv6_hop_ra,
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 7b1a884634d5..f73d59a14131 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -9,7 +9,7 @@
* find out if nexthdr is a well-known extension header or a protocol
*/
-int ipv6_ext_hdr(u8 nexthdr)
+bool ipv6_ext_hdr(u8 nexthdr)
{
/*
* find out if nexthdr is an extension header or a protocol
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index cc079d8d4681..091a2971c7b7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -29,6 +29,8 @@
* Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -129,7 +131,7 @@ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
* --ANK (980726)
*/
-static int is_ineligible(struct sk_buff *skb)
+static bool is_ineligible(const struct sk_buff *skb)
{
int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
int len = skb->len - ptr;
@@ -137,11 +139,11 @@ static int is_ineligible(struct sk_buff *skb)
__be16 frag_off;
if (len < 0)
- return 1;
+ return true;
ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
if (ptr < 0)
- return 0;
+ return false;
if (nexthdr == IPPROTO_ICMPV6) {
u8 _type, *tp;
tp = skb_header_pointer(skb,
@@ -149,9 +151,9 @@ static int is_ineligible(struct sk_buff *skb)
sizeof(_type), &_type);
if (tp == NULL ||
!(*tp & ICMPV6_INFOMSG_MASK))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -206,14 +208,14 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
* highest-order two bits set to 10
*/
-static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
+static bool opt_unrec(struct sk_buff *skb, __u32 offset)
{
u8 _optval, *op;
offset += skb_network_offset(skb);
op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
if (op == NULL)
- return 1;
+ return true;
return (*op & 0xC0) == 0x80;
}
@@ -820,9 +822,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
err = inet_ctl_sock_create(&sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- printk(KERN_ERR
- "Failed to initialize the ICMP6 control socket "
- "(err %d).\n",
+ pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
err);
goto fail;
}
@@ -881,7 +881,7 @@ int __init icmpv6_init(void)
return 0;
fail:
- printk(KERN_ERR "Failed to register ICMP6 protocol\n");
+ pr_err("Failed to register ICMP6 protocol\n");
unregister_pernet_subsys(&icmpv6_sk_ops);
return err;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 93717435013e..0c220a416626 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -18,6 +18,9 @@
* routing table.
* Ville Nuorvala: Fixed routing subtrees.
*/
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/net.h>
@@ -38,7 +41,7 @@
#define RT6_DEBUG 2
#if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
+#define RT6_TRACE(x...) pr_debug(x)
#else
#define RT6_TRACE(x...) do { ; } while (0)
#endif
@@ -451,12 +454,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
!ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
if (!allow_create) {
if (replace_required) {
- pr_warn("IPv6: Can't replace route, "
- "no match found\n");
+ pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
- pr_warn("IPv6: NLM_F_CREATE should be set "
- "when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
}
goto insert_above;
}
@@ -499,11 +500,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
* That would keep IPv6 consistent with IPv4
*/
if (replace_required) {
- pr_warn("IPv6: Can't replace route, no match found\n");
+ pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
- pr_warn("IPv6: NLM_F_CREATE should be set "
- "when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
}
/*
* We walked to the bottom of tree.
@@ -696,7 +696,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
*/
if (!replace) {
if (!add)
- pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
rt->dst.rt6_next = iter;
@@ -715,7 +715,7 @@ add:
if (!found) {
if (add)
goto add;
- pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+ pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
return -ENOENT;
}
*ins = rt;
@@ -768,7 +768,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
replace_required = 1;
}
if (!allow_create && !replace_required)
- pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
+ pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
@@ -1420,7 +1420,8 @@ static int fib6_clean_node(struct fib6_walker_t *w)
res = fib6_del(rt, &info);
if (res) {
#if RT6_DEBUG >= 2
- printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
+ pr_debug("%s: del failed: rt=%p@%p err=%d\n",
+ __func__, rt, rt->rt6i_node, res);
#endif
continue;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index cb43df690210..9772fbd8a3f5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -433,32 +433,32 @@ static int mem_check(struct sock *sk)
return 0;
}
-static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
+static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
{
if (h1 == h2)
- return 0;
+ return false;
if (h1 == NULL || h2 == NULL)
- return 1;
+ return true;
if (h1->hdrlen != h2->hdrlen)
- return 1;
+ return true;
return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
}
-static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
+static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
{
if (o1 == o2)
- return 0;
+ return false;
if (o1 == NULL || o2 == NULL)
- return 1;
+ return true;
if (o1->opt_nflen != o2->opt_nflen)
- return 1;
+ return true;
if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
- return 1;
+ return true;
if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
- return 1;
+ return true;
if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
- return 1;
- return 0;
+ return true;
+ return false;
}
static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 1ca5d45a12e8..21a15dfe4a9e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -170,7 +170,8 @@ static int ip6_input_finish(struct sk_buff *skb)
{
const struct inet6_protocol *ipprot;
unsigned int nhoff;
- int nexthdr, raw;
+ int nexthdr;
+ bool raw;
u8 hash;
struct inet6_dev *idev;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -251,7 +252,7 @@ int ip6_input(struct sk_buff *skb)
int ip6_mc_input(struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
- int deliver;
+ bool deliver;
IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
@@ -287,7 +288,7 @@ int ip6_mc_input(struct sk_buff *skb)
* is for MLD (0x0000).
*/
if ((ptr[2] | ptr[3]) == 0) {
- deliver = 0;
+ deliver = false;
if (!ipv6_ext_hdr(nexthdr)) {
/* BUG */
@@ -312,7 +313,7 @@ int ip6_mc_input(struct sk_buff *skb)
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MLD2_REPORT:
- deliver = 1;
+ deliver = true;
break;
}
goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d8e05af2c4bb..d99fdc699625 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -252,8 +252,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
dst->dev, dst_output);
}
- if (net_ratelimit())
- printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
+ net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
@@ -644,7 +643,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
- if (!skb->local_df && skb->len > mtu) {
+ if (unlikely(!skb->local_df && skb->len > mtu)) {
+ if (skb->sk && dst_allfrag(skb_dst(skb)))
+ sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -789,6 +791,10 @@ slow_path_clean:
}
slow_path:
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
+ skb_checksum_help(skb))
+ goto fail;
+
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
@@ -1199,7 +1205,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int copy;
int err;
int offset = 0;
- int csummode = CHECKSUM_NONE;
__u8 tx_flags = 0;
if (flags&MSG_PROBE)
@@ -1412,7 +1417,7 @@ alloc_new_skb:
/*
* Fill in the control structures
*/
- skb->ip_summed = csummode;
+ skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
@@ -1455,7 +1460,6 @@ alloc_new_skb:
transhdrlen = 0;
exthdrlen = 0;
dst_exthdrlen = 0;
- csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5df487c81ed9..c9015fad8d65 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -18,6 +18,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_NETDEV("ip6tnl0");
#ifdef IP6_TNL_DEBUG
-#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
+#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
#else
#define IP6_TNL_TRACE(x...) do {;} while(0)
#endif
@@ -460,19 +462,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Path to destination invalid "
- "or inactive!\n", t->parms.name);
+ net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+ t->parms.name);
rel_msg = 1;
break;
case ICMPV6_TIME_EXCEED:
if ((*code) == ICMPV6_EXC_HOPLIMIT) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Too small hop limit or "
- "routing loop in tunnel!\n",
- t->parms.name);
+ net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
break;
@@ -484,17 +481,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Too small encapsulation "
- "limit or routing loop in "
- "tunnel!\n", t->parms.name);
+ net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
- } else if (net_ratelimit()) {
- printk(KERN_WARNING
- "%s: Recipient unable to parse tunneled "
- "packet!\n ", t->parms.name);
+ } else {
+ net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+ t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
@@ -825,7 +818,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
* 0 else
**/
-static inline int
+static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
{
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
@@ -845,15 +838,12 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
ldev = dev_get_by_index_rcu(net, p->link);
if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
- printk(KERN_WARNING
- "%s xmit: Local address not yet configured!\n",
- p->name);
+ pr_warn("%s xmit: Local address not yet configured!\n",
+ p->name);
else if (!ipv6_addr_is_multicast(&p->raddr) &&
unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
- printk(KERN_WARNING
- "%s xmit: Routing loop! "
- "Remote address found on this node!\n",
- p->name);
+ pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
+ p->name);
else
ret = 1;
rcu_read_unlock();
@@ -919,10 +909,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (tdev == dev) {
stats->collisions++;
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Local routing loop detected!\n",
- t->parms.name);
+ net_warn_ratelimited("%s: Local routing loop detected!\n",
+ t->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - sizeof (*ipv6h);
@@ -1553,13 +1541,13 @@ static int __init ip6_tunnel_init(void)
err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
if (err < 0) {
- printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
+ pr_err("%s: can't register ip4ip6\n", __func__);
goto out_ip4ip6;
}
err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
if (err < 0) {
- printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
+ pr_err("%s: can't register ip6ip6\n", __func__);
goto out_ip6ip6;
}
@@ -1580,10 +1568,10 @@ out_pernet:
static void __exit ip6_tunnel_cleanup(void)
{
if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
- printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
+ pr_info("%s: can't deregister ip4ip6\n", __func__);
if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
- printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
+ pr_info("%s: can't deregister ip6ip6\n", __func__);
unregister_pernet_device(&ip6_tnl_net_ops);
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index efc0098b59dd..b15dc08643a4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1147,8 +1147,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
*/
ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
if (ret < 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
+ net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
kfree_skb(skb);
}
@@ -1351,7 +1350,7 @@ int __init ip6_mr_init(void)
goto reg_notif_fail;
#ifdef CONFIG_IPV6_PIMSM_V2
if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
- printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
+ pr_err("%s: can't add PIM protocol\n", __func__);
err = -EAGAIN;
goto add_proto_fail;
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index bba658d9a03c..5cb75bfe45b1 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -30,6 +30,9 @@
* The decompression of IP datagram MUST be done after the reassembly,
* AH/ESP processing.
*/
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -69,8 +72,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n",
- spi, &iph->daddr);
+ pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
+ spi, &iph->daddr);
xfrm_state_put(x);
}
@@ -190,11 +193,11 @@ static const struct inet6_protocol ipcomp6_protocol =
static int __init ipcomp6_init(void)
{
if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
- printk(KERN_INFO "ipcomp6 init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ipcomp6_type, AF_INET6);
return -EAGAIN;
}
@@ -204,9 +207,9 @@ static int __init ipcomp6_init(void)
static void __exit ipcomp6_fini(void)
{
if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
- printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ipcomp6_init);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7dfb89f2bae5..6d0f5dc8e3a6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -606,13 +606,13 @@ done:
return err;
}
-int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
- const struct in6_addr *src_addr)
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc;
struct ip6_sf_socklist *psl;
- int rv = 1;
+ bool rv = true;
rcu_read_lock();
for_each_pmc_rcu(np, mc) {
@@ -621,7 +621,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
}
if (!mc) {
rcu_read_unlock();
- return 1;
+ return true;
}
read_lock(&mc->sflock);
psl = mc->sflist;
@@ -635,9 +635,9 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
break;
}
if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
- rv = 0;
+ rv = false;
if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
- rv = 0;
+ rv = false;
}
read_unlock(&mc->sflock);
rcu_read_unlock();
@@ -931,15 +931,15 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
/*
* identify MLD packets for MLD filter exceptions
*/
-int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
+bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
{
struct icmp6hdr *pic;
if (nexthdr != IPPROTO_ICMPV6)
- return 0;
+ return false;
if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
- return 0;
+ return false;
pic = icmp6_hdr(skb);
@@ -948,22 +948,22 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MLD2_REPORT:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
/*
* check if the interface/address pair is valid
*/
-int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
- const struct in6_addr *src_addr)
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr)
{
struct inet6_dev *idev;
struct ifmcaddr6 *mc;
- int rv = 0;
+ bool rv = false;
rcu_read_lock();
idev = __in6_dev_get(dev);
@@ -990,7 +990,7 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
spin_unlock_bh(&mc->mca_lock);
} else
- rv = 1; /* don't filter unspecified source */
+ rv = true; /* don't filter unspecified source */
}
read_unlock_bh(&idev->lock);
}
@@ -1046,8 +1046,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
}
/* mark EXCLUDE-mode sources */
-static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
- const struct in6_addr *srcs)
+static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
+ const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
@@ -1070,12 +1070,12 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
}
pmc->mca_flags &= ~MAF_GSQUERY;
if (scount == nsrcs) /* all sources excluded */
- return 0;
- return 1;
+ return false;
+ return true;
}
-static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
- const struct in6_addr *srcs)
+static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
+ const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
@@ -1099,10 +1099,10 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
}
if (!scount) {
pmc->mca_flags &= ~MAF_GSQUERY;
- return 0;
+ return false;
}
pmc->mca_flags |= MAF_GSQUERY;
- return 1;
+ return true;
}
/* called with rcu_read_lock() */
@@ -1276,17 +1276,17 @@ int igmp6_event_report(struct sk_buff *skb)
return 0;
}
-static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
- int gdeleted, int sdeleted)
+static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
+ int gdeleted, int sdeleted)
{
switch (type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
if (pmc->mca_sfmode == MCAST_INCLUDE)
- return 1;
+ return true;
/* don't include if this source is excluded
* in all filters
*/
@@ -1295,29 +1295,29 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
- return 0;
+ return false;
case MLD2_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
return psf->sf_count[MCAST_INCLUDE] != 0;
case MLD2_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
- return 0;
+ return false;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case MLD2_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
- return 0;
+ return false;
return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
case MLD2_BLOCK_OLD_SOURCES:
if (pmc->mca_sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
- return 0;
+ return false;
}
static int
@@ -2627,8 +2627,7 @@ static int __net_init igmp6_net_init(struct net *net)
err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- printk(KERN_ERR
- "Failed to initialize the IGMP6 control socket (err %d).\n",
+ pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
err);
goto out;
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 7e1e0fbfef21..5b087c31d87b 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -22,6 +22,8 @@
* Masahide NAKAMURA @USAGI
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/time.h>
@@ -44,7 +46,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
if (!data)
return NULL;
if (padlen == 1) {
- data[0] = IPV6_TLV_PAD0;
+ data[0] = IPV6_TLV_PAD1;
} else if (padlen > 1) {
data[0] = IPV6_TLV_PADN;
data[1] = padlen - 2;
@@ -307,13 +309,12 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
static int mip6_destopt_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
- printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
- x->id.spi);
+ pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- printk(KERN_INFO "%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ pr_info("%s: state's mode is not %u: %u\n",
+ __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
@@ -443,13 +444,12 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
static int mip6_rthdr_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
- printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
- x->id.spi);
+ pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- printk(KERN_INFO "%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ pr_info("%s: state's mode is not %u: %u\n",
+ __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
@@ -481,18 +481,18 @@ static const struct xfrm_type mip6_rthdr_type =
static int __init mip6_init(void)
{
- printk(KERN_INFO "Mobile IPv6\n");
+ pr_info("Mobile IPv6\n");
if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
- printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__);
+ pr_info("%s: can't add xfrm type(destopt)\n", __func__);
goto mip6_destopt_xfrm_fail;
}
if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
- printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__);
+ pr_info("%s: can't add xfrm type(rthdr)\n", __func__);
goto mip6_rthdr_xfrm_fail;
}
if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
- printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__);
+ pr_info("%s: can't add rawv6 mh filter\n", __func__);
goto mip6_rawv6_mh_fail;
}
@@ -510,11 +510,11 @@ static int __init mip6_init(void)
static void __exit mip6_fini(void)
{
if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
- printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__);
+ pr_info("%s: can't remove rawv6 mh filter\n", __func__);
if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
- printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__);
+ pr_info("%s: can't remove xfrm type(rthdr)\n", __func__);
if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
- printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__);
+ pr_info("%s: can't remove xfrm type(destopt)\n", __func__);
}
module_init(mip6_init);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 35615c6358b8..54f62d3b8dd6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -27,27 +27,7 @@
* YOSHIFUJI Hideaki @USAGI : Verify ND options properly
*/
-/* Set to 3 to get tracing... */
-#define ND_DEBUG 1
-
-#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0)
-#define ND_NOPRINTK(x...) do { ; } while(0)
-#define ND_PRINTK0 ND_PRINTK
-#define ND_PRINTK1 ND_NOPRINTK
-#define ND_PRINTK2 ND_NOPRINTK
-#define ND_PRINTK3 ND_NOPRINTK
-#if ND_DEBUG >= 1
-#undef ND_PRINTK1
-#define ND_PRINTK1 ND_PRINTK
-#endif
-#if ND_DEBUG >= 2
-#undef ND_PRINTK2
-#define ND_PRINTK2 ND_PRINTK
-#endif
-#if ND_DEBUG >= 3
-#undef ND_PRINTK3
-#define ND_PRINTK3 ND_PRINTK
-#endif
+#define pr_fmt(fmt) "ICMPv6: " fmt
#include <linux/module.h>
#include <linux/errno.h>
@@ -92,6 +72,15 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
+/* Set to 3 to get tracing... */
+#define ND_DEBUG 1
+
+#define ND_PRINTK(val, level, fmt, ...) \
+do { \
+ if (val <= ND_DEBUG) \
+ net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
+} while (0)
+
static u32 ndisc_hash(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
@@ -265,10 +254,9 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
case ND_OPT_MTU:
case ND_OPT_REDIRECT_HDR:
if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
- ND_PRINTK2(KERN_WARNING
- "%s(): duplicated ND6 option found: type=%d\n",
- __func__,
- nd_opt->nd_opt_type);
+ ND_PRINTK(2, warn,
+ "%s: duplicated ND6 option found: type=%d\n",
+ __func__, nd_opt->nd_opt_type);
} else {
ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
}
@@ -296,10 +284,11 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
* to accommodate future extension to the
* protocol.
*/
- ND_PRINTK2(KERN_NOTICE
- "%s(): ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type, nd_opt->nd_opt_len);
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
}
opt_len -= l;
@@ -327,9 +316,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
case ARPHRD_FDDI:
ipv6_eth_mc_map(addr, buf);
return 0;
- case ARPHRD_IEEE802_TR:
- ipv6_tr_mc_map(addr,buf);
- return 0;
case ARPHRD_ARCNET:
ipv6_arcnet_mc_map(addr, buf);
return 0;
@@ -362,7 +348,7 @@ static int ndisc_constructor(struct neighbour *neigh)
struct net_device *dev = neigh->dev;
struct inet6_dev *in6_dev;
struct neigh_parms *parms;
- int is_multicast = ipv6_addr_is_multicast(addr);
+ bool is_multicast = ipv6_addr_is_multicast(addr);
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
@@ -458,9 +444,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
len + hlen + tlen),
1, &err);
if (!skb) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
- __func__, err);
+ ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n",
+ __func__, err);
return NULL;
}
@@ -696,8 +681,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
if ((probes -= neigh->parms->ucast_probes) < 0) {
if (!(neigh->nud_state & NUD_VALID)) {
- ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n",
- __func__, target);
+ ND_PRINTK(1, dbg,
+ "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+ __func__, target);
}
ndisc_send_ns(dev, neigh, target, target, saddr);
} else if ((probes -= neigh->parms->app_probes) < 0) {
@@ -739,12 +725,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
struct inet6_dev *idev = NULL;
struct neighbour *neigh;
int dad = ipv6_addr_any(saddr);
- int inc;
+ bool inc;
int is_router = -1;
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: multicast target address");
+ ND_PRINTK(2, warn, "NS: multicast target address\n");
return;
}
@@ -757,22 +742,20 @@ static void ndisc_recv_ns(struct sk_buff *skb)
daddr->s6_addr32[1] == htonl(0x00000000) &&
daddr->s6_addr32[2] == htonl(0x00000001) &&
daddr->s6_addr [12] == 0xff )) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: bad DAD packet (wrong destination)\n");
+ ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
return;
}
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid ND options\n");
+ ND_PRINTK(2, warn, "NS: invalid ND options\n");
return;
}
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "NS: invalid link-layer address length\n");
return;
}
@@ -782,8 +765,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
* in the message.
*/
if (dad) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: bad DAD packet (link-layer address option)\n");
+ ND_PRINTK(2, warn,
+ "NS: bad DAD packet (link-layer address option)\n");
return;
}
}
@@ -795,20 +778,6 @@ static void ndisc_recv_ns(struct sk_buff *skb)
if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
if (dad) {
- if (dev->type == ARPHRD_IEEE802_TR) {
- const unsigned char *sadr;
- sadr = skb_mac_header(skb);
- if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
- sadr[9] == dev->dev_addr[1] &&
- sadr[10] == dev->dev_addr[2] &&
- sadr[11] == dev->dev_addr[3] &&
- sadr[12] == dev->dev_addr[4] &&
- sadr[13] == dev->dev_addr[5]) {
- /* looped-back to us */
- goto out;
- }
- }
-
/*
* We are colliding with another node
* who is doing DAD
@@ -915,34 +884,30 @@ static void ndisc_recv_na(struct sk_buff *skb)
struct neighbour *neigh;
if (skb->len < sizeof(struct nd_msg)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: packet too short\n");
+ ND_PRINTK(2, warn, "NA: packet too short\n");
return;
}
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: target address is multicast.\n");
+ ND_PRINTK(2, warn, "NA: target address is multicast\n");
return;
}
if (ipv6_addr_is_multicast(daddr) &&
msg->icmph.icmp6_solicited) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: solicited NA is multicasted.\n");
+ ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
return;
}
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid ND option\n");
+ ND_PRINTK(2, warn, "NS: invalid ND option\n");
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "NA: invalid link-layer address length\n");
return;
}
}
@@ -963,9 +928,9 @@ static void ndisc_recv_na(struct sk_buff *skb)
unsolicited advertisement.
*/
if (skb->pkt_type != PACKET_LOOPBACK)
- ND_PRINTK1(KERN_WARNING
- "ICMPv6 NA: someone advertises our address %pI6 on %s!\n",
- &ifp->addr, ifp->idev->dev->name);
+ ND_PRINTK(1, warn,
+ "NA: someone advertises our address %pI6 on %s!\n",
+ &ifp->addr, ifp->idev->dev->name);
in6_ifa_put(ifp);
return;
}
@@ -1027,8 +992,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
if (!idev) {
- if (net_ratelimit())
- ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
+ ND_PRINTK(1, err, "RS: can't find in6 device\n");
return;
}
@@ -1045,8 +1009,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
/* Parse ND options */
if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) {
- if (net_ratelimit())
- ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n");
+ ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
goto out;
}
@@ -1144,20 +1107,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: source address is not link-local.\n");
+ ND_PRINTK(2, warn, "RA: source address is not link-local\n");
return;
}
if (optlen < 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: packet too short\n");
+ ND_PRINTK(2, warn, "RA: packet too short\n");
return;
}
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: from host or unauthorized router\n");
+ ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
return;
}
#endif
@@ -1168,15 +1128,13 @@ static void ndisc_router_discovery(struct sk_buff *skb)
in6_dev = __in6_dev_get(skb->dev);
if (in6_dev == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: can't find inet6 device for %s.\n",
- skb->dev->name);
+ ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
+ skb->dev->name);
return;
}
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMP6 RA: invalid ND options\n");
+ ND_PRINTK(2, warn, "RA: invalid ND options\n");
return;
}
@@ -1229,9 +1187,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt) {
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (!neigh) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() got default router without neighbour.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s got default router without neighbour\n",
+ __func__);
dst_release(&rt->dst);
return;
}
@@ -1242,22 +1200,21 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (rt == NULL && lifetime) {
- ND_PRINTK3(KERN_DEBUG
- "ICMPv6 RA: adding default router.\n");
+ ND_PRINTK(3, dbg, "RA: adding default router\n");
rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
if (rt == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() failed to add default route.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s failed to add default route\n",
+ __func__);
return;
}
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (neigh == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() got default router without neighbour.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s got default router without neighbour\n",
+ __func__);
dst_release(&rt->dst);
return;
}
@@ -1325,8 +1282,8 @@ skip_linkparms:
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
skb->dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "RA: invalid link-layer address length\n");
goto out;
}
}
@@ -1390,9 +1347,7 @@ skip_routeinfo:
mtu = ntohl(n);
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid mtu: %d\n",
- mtu);
+ ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
} else if (in6_dev->cnf.mtu6 != mtu) {
in6_dev->cnf.mtu6 = mtu;
@@ -1413,8 +1368,7 @@ skip_routeinfo:
}
if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid RA options");
+ ND_PRINTK(2, warn, "RA: invalid RA options\n");
}
out:
if (rt)
@@ -1439,15 +1393,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
case NDISC_NODETYPE_NODEFAULT:
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: from host or unauthorized router\n");
+ ND_PRINTK(2, warn,
+ "Redirect: from host or unauthorized router\n");
return;
}
#endif
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: source address is not link-local.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: source address is not link-local\n");
return;
}
@@ -1455,8 +1409,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
if (optlen < 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: packet too short\n");
+ ND_PRINTK(2, warn, "Redirect: packet too short\n");
return;
}
@@ -1465,8 +1418,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
dest = target + 1;
if (ipv6_addr_is_multicast(dest)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: destination address is multicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: destination address is multicast\n");
return;
}
@@ -1474,8 +1427,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
on_link = 1;
} else if (ipv6_addr_type(target) !=
(IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local unicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: target address is not link-local unicast\n");
return;
}
@@ -1491,16 +1444,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
*/
if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: invalid ND options\n");
+ ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
skb->dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "Redirect: invalid link-layer address length\n");
return;
}
}
@@ -1535,16 +1487,15 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: no link-local address on %s\n",
- dev->name);
+ ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
+ dev->name);
return;
}
if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local unicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: target address is not link-local unicast\n");
return;
}
@@ -1563,8 +1514,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & RTF_GATEWAY) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: destination is not a neighbour.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: destination is not a neighbour\n");
goto release;
}
if (!rt->rt6i_peer)
@@ -1575,8 +1526,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
if (dev->addr_len) {
struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
if (!neigh) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: no neigh for target address\n");
+ ND_PRINTK(2, warn,
+ "Redirect: no neigh for target address\n");
goto release;
}
@@ -1604,9 +1555,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
len + hlen + tlen),
1, &err);
if (buff == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n",
- __func__, err);
+ ND_PRINTK(0, err,
+ "Redirect: %s failed to allocate an skb, err=%d\n",
+ __func__, err);
goto release;
}
@@ -1691,16 +1642,14 @@ int ndisc_rcv(struct sk_buff *skb)
__skb_push(skb, skb->data - skb_transport_header(skb));
if (ipv6_hdr(skb)->hop_limit != 255) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NDISC: invalid hop-limit: %d\n",
- ipv6_hdr(skb)->hop_limit);
+ ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
+ ipv6_hdr(skb)->hop_limit);
return 0;
}
if (msg->icmph.icmp6_code != 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NDISC: invalid ICMPv6 code: %d\n",
- msg->icmph.icmp6_code);
+ ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
+ msg->icmph.icmp6_code);
return 0;
}
@@ -1767,11 +1716,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
static int warned;
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
- printk(KERN_WARNING
- "process `%s' is using deprecated sysctl (%s) "
- "net.ipv6.neigh.%s.%s; "
- "Use net.ipv6.neigh.%s.%s_ms "
- "instead.\n",
+ pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
warncomm, func,
dev_name, ctl->procname,
dev_name, ctl->procname);
@@ -1825,9 +1770,9 @@ static int __net_init ndisc_net_init(struct net *net)
err = inet_ctl_sock_create(&sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n",
- err);
+ ND_PRINTK(0, err,
+ "NDISC: Failed to initialize the control socket (err %d)\n",
+ err);
return err;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index d4e350f72bbb..d7cb04506c3d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -133,7 +133,7 @@ ip6_packet_match(const struct sk_buff *skb,
int protohdr;
unsigned short _frag_off;
- protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
+ protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
if (protohdr < 0) {
if (_frag_off == 0)
*hotdrop = true;
@@ -181,8 +181,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6)
static unsigned int
ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_info("error: `%s'\n", (const char *)par->targinfo);
+ net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
@@ -362,6 +361,7 @@ ip6t_do_table(struct sk_buff *skb,
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
+ acpar.thoff = 0;
if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
&acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
no_match:
@@ -2278,6 +2278,10 @@ static void __exit ip6_tables_fini(void)
* if target < 0. "last header" is transport protocol header, ESP, or
* "No next header".
*
+ * Note that *offset is used as input/output parameter. an if it is not zero,
+ * then it must be a valid offset to an inner IPv6 header. This can be used
+ * to explore inner IPv6 header, eg. ICMPv6 error messages.
+ *
* If target header is found, its offset is set in *offset and return protocol
* number. Otherwise, return -1.
*
@@ -2289,17 +2293,33 @@ static void __exit ip6_tables_fini(void)
* *offset is meaningless and fragment offset is stored in *fragoff if fragoff
* isn't NULL.
*
+ * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
+ * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
+ * target < 0, then this function will stop at the AH header.
*/
int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
- int target, unsigned short *fragoff)
+ int target, unsigned short *fragoff, int *flags)
{
unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
- unsigned int len = skb->len - start;
+ unsigned int len;
if (fragoff)
*fragoff = 0;
+ if (*offset) {
+ struct ipv6hdr _ip6, *ip6;
+
+ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+ if (!ip6 || (ip6->version != 6)) {
+ printk(KERN_ERR "IPv6 header not found\n");
+ return -EBADMSG;
+ }
+ start = *offset + sizeof(struct ipv6hdr);
+ nexthdr = ip6->nexthdr;
+ }
+ len = skb->len - start;
+
while (nexthdr != target) {
struct ipv6_opt_hdr _hdr, *hp;
unsigned int hdrlen;
@@ -2316,6 +2336,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
if (nexthdr == NEXTHDR_FRAGMENT) {
unsigned short _frag_off;
__be16 *fp;
+
+ if (flags) /* Indicate that this is a fragment */
+ *flags |= IP6T_FH_F_FRAG;
fp = skb_header_pointer(skb,
start+offsetof(struct frag_hdr,
frag_off),
@@ -2336,9 +2359,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
return -ENOENT;
}
hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
+ } else if (nexthdr == NEXTHDR_AUTH) {
+ if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
+ break;
hdrlen = (hp->hdrlen + 2) << 2;
- else
+ } else
hdrlen = ipv6_optlen(hp);
nexthdr = hp->nexthdr;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index aad2fa41cf46..fd4fb34c51c7 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -114,8 +114,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
GFP_ATOMIC);
if (!nskb) {
- if (net_ratelimit())
- pr_debug("cannot alloc skb\n");
+ net_dbg_ratelimited("cannot alloc skb\n");
dst_release(dst);
return;
}
@@ -210,8 +209,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
send_reset(net, skb);
break;
default:
- if (net_ratelimit())
- pr_info("case %u not handled yet\n", reject->with);
+ net_info_ratelimited("case %u not handled yet\n", reject->with);
break;
}
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 89cccc5a9c92..04099ab7d2e3 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -41,11 +41,11 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
struct ip_auth_hdr _ah;
const struct ip_auth_hdr *ah;
const struct ip6t_ah *ahinfo = par->matchinfo;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index eda898fda6ca..3b5735e56bfe 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -40,10 +40,10 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
struct frag_hdr _frag;
const struct frag_hdr *fh;
const struct ip6t_frag *fraginfo = par->matchinfo;
- unsigned int ptr;
+ unsigned int ptr = 0;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 59df051eaef6..01df142bb027 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -50,7 +50,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
const struct ipv6_opt_hdr *oh;
const struct ip6t_opts *optinfo = par->matchinfo;
unsigned int temp;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
bool ret = false;
u8 _opttype;
@@ -62,7 +62,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
err = ipv6_find_hdr(skb, &ptr,
(par->match == &hbh_mt6_reg[0]) ?
- NEXTHDR_HOP : NEXTHDR_DEST, NULL);
+ NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index d8488c50a8e0..2c99b94eeca3 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -42,14 +42,14 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
const struct ipv6_rt_hdr *rh;
const struct ip6t_rt *rtinfo = par->matchinfo;
unsigned int temp;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
bool ret = false;
struct in6_addr _addr;
const struct in6_addr *ap;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 00d19173db7e..4d782405f125 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,8 +42,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- pr_warning("ip6t_hook: happy cracking.\n");
+ net_warn_ratelimited("ip6t_hook: happy cracking\n");
return NF_ACCEPT;
}
#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index fe925e492520..3224ef90a21a 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -232,8 +232,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct ipv6hdr)) {
- if (net_ratelimit())
- pr_notice("ipv6_conntrack_local: packet too short\n");
+ net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT;
}
return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 48a2be1b7c70..c9c78c2e666b 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -444,12 +444,11 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
return head;
out_oversize:
- if (net_ratelimit())
- printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
+ net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
+ payload_len);
goto out_fail;
out_oom:
- if (net_ratelimit())
- printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
+ net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
out_fail:
return NULL;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5bddea778840..93d69836fded 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
const struct in6_addr *rmt_addr, int dif)
{
struct hlist_node *node;
- int is_multicast = ipv6_addr_is_multicast(loc_addr);
+ bool is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk, node)
if (inet_sk(sk)->inet_num == num) {
@@ -153,12 +153,12 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
*
* Caller owns SKB so we must make clones.
*/
-static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
- int delivered = 0;
+ bool delivered = false;
__u8 hash;
struct net *net;
@@ -179,7 +179,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
while (sk) {
int filtered;
- delivered = 1;
+ delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
@@ -225,7 +225,7 @@ out:
return delivered;
}
-int raw6_local_deliver(struct sk_buff *skb, int nexthdr)
+bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 54c5d2b704df..4ff9af628e72 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -134,15 +134,16 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q)
return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
}
-int ip6_frag_match(struct inet_frag_queue *q, void *a)
+bool ip6_frag_match(struct inet_frag_queue *q, void *a)
{
struct frag_queue *fq;
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id && fq->user == arg->user &&
- ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst));
+ return fq->id == arg->id &&
+ fq->user == arg->user &&
+ ipv6_addr_equal(&fq->saddr, arg->src) &&
+ ipv6_addr_equal(&fq->daddr, arg->dst);
}
EXPORT_SYMBOL(ip6_frag_match);
@@ -414,6 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct sk_buff *fp, *head = fq->q.fragments;
int payload_len;
unsigned int nhoff;
+ int sum_truesize;
fq_kill(fq);
@@ -483,20 +485,33 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
- skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
}
- atomic_sub(head->truesize, &fq->q.net->mem);
+ atomic_sub(sum_truesize, &fq->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -518,12 +533,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
return 1;
out_oversize:
- if (net_ratelimit())
- printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
+ net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
goto out_fail;
out_oom:
- if (net_ratelimit())
- printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
+ net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
out_fail:
rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0aefc36f74c7..999a982ad3fd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -24,6 +24,8 @@
* Fixed routing subtrees.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -331,22 +333,22 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
}
}
-static __inline__ int rt6_check_expired(const struct rt6_info *rt)
+static bool rt6_check_expired(const struct rt6_info *rt)
{
struct rt6_info *ort = NULL;
if (rt->rt6i_flags & RTF_EXPIRES) {
if (time_after(jiffies, rt->dst.expires))
- return 1;
+ return true;
} else if (rt->dst.from) {
ort = (struct rt6_info *) rt->dst.from;
return (ort->rt6i_flags & RTF_EXPIRES) &&
time_after(jiffies, ort->dst.expires);
}
- return 0;
+ return false;
}
-static inline int rt6_need_strict(const struct in6_addr *daddr)
+static bool rt6_need_strict(const struct in6_addr *daddr)
{
return ipv6_addr_type(daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -794,9 +796,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
goto retry;
}
- if (net_ratelimit())
- printk(KERN_WARNING
- "ipv6: Neighbour table overflow.\n");
+ net_warn_ratelimited("Neighbour table overflow\n");
dst_free(&rt->dst);
return NULL;
}
@@ -1282,7 +1282,7 @@ int ip6_route_add(struct fib6_config *cfg)
!(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
table = fib6_get_table(net, cfg->fc_table);
if (!table) {
- printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+ pr_warn("NLM_F_CREATE should be specified when creating new route\n");
table = fib6_new_table(net, cfg->fc_table);
}
} else {
@@ -1643,9 +1643,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
if (rt == net->ipv6.ip6_null_entry) {
- if (net_ratelimit())
- printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
- "for redirect target\n");
+ net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
goto out;
}
@@ -2106,9 +2104,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
int err;
if (!rt) {
- if (net_ratelimit())
- pr_warning("IPv6: Maximum number of routes reached,"
- " consider increasing route/max_size.\n");
+ net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e5fef943e30a..60415711563f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -17,6 +17,8 @@
* Fred Templin <fred.l.templin@boeing.com>: isatap support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
@@ -702,8 +704,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "sit: nexthop == NULL\n");
+ net_dbg_ratelimited("sit: nexthop == NULL\n");
goto tx_error;
}
@@ -732,8 +733,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "sit: nexthop == NULL\n");
+ net_dbg_ratelimited("sit: nexthop == NULL\n");
goto tx_error;
}
@@ -1303,7 +1303,7 @@ static int __init sit_init(void)
{
int err;
- printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
+ pr_info("IPv6 over IPv4 tunneling driver\n");
err = register_pernet_device(&sit_net_ops);
if (err < 0)
@@ -1311,7 +1311,7 @@ static int __init sit_init(void)
err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
if (err < 0) {
unregister_pernet_device(&sit_net_ops);
- printk(KERN_INFO "sit init: Can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
}
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 078d039e8fd2..554d5999abc4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -723,12 +723,10 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
- if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
- genhash ? "failed" : "mismatch",
- &ip6h->saddr, ntohs(th->source),
- &ip6h->daddr, ntohs(th->dest));
- }
+ net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
+ genhash ? "failed" : "mismatch",
+ &ip6h->saddr, ntohs(th->source),
+ &ip6h->daddr, ntohs(th->dest));
return 1;
}
return 0;
@@ -1057,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
- int want_cookie = 0;
+ bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
@@ -1118,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
- want_cookie = 0; /* not our kind of cookie */
+ want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 4f3cec12aa85..4b0f50d9a962 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -19,6 +19,8 @@
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -160,11 +162,11 @@ static const struct inet6_protocol tunnel46_protocol = {
static int __init tunnel6_init(void)
{
if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
- printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+ pr_err("%s: can't add protocol\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
- printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+ pr_err("%s: can't add protocol\n", __func__);
inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
return -EAGAIN;
}
@@ -174,9 +176,9 @@ static int __init tunnel6_init(void)
static void __exit tunnel6_fini(void)
{
if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
- printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+ pr_err("%s: can't remove protocol\n", __func__);
if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
- printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+ pr_err("%s: can't remove protocol\n", __func__);
}
module_init(tunnel6_init);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c1d91a713e8e..f05099fc5901 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
- unsigned int hash2_partial =
+ unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
/* precompute partial secondary hash */
@@ -349,7 +349,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
bool slow;
if (addr_len)
- *addr_len=sizeof(struct sockaddr_in6);
+ *addr_len = sizeof(struct sockaddr_in6);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
@@ -1379,7 +1379,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len- offset, 0);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 9680226640ef..dfd6faaf0ea7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -983,10 +983,6 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
goto out;
switch (idef->ipx_dlink_type) {
- case IPX_FRAME_TR_8022:
- printk(KERN_WARNING "IPX frame type 802.2TR is "
- "obsolete Use 802.2 instead.\n");
- /* fall through */
case IPX_FRAME_8022:
dlink_type = htons(ETH_P_802_2);
datalink = p8022_datalink;
@@ -996,10 +992,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
dlink_type = htons(ETH_P_IPX);
datalink = pEII_datalink;
break;
- } else
- printk(KERN_WARNING "IPX frame type EtherII over "
- "token-ring is obsolete. Use SNAP "
- "instead.\n");
+ }
/* fall through */
case IPX_FRAME_SNAP:
dlink_type = htons(ETH_P_SNAP);
@@ -1275,7 +1268,6 @@ const char *ipx_frame_name(__be16 frame)
case ETH_P_802_2: rc = "802.2"; break;
case ETH_P_SNAP: rc = "SNAP"; break;
case ETH_P_802_3: rc = "802.3"; break;
- case ETH_P_TR_802_2: rc = "802.2TR"; break;
}
return rc;
@@ -1909,9 +1901,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
(const unsigned short __user *)argp);
break;
case SIOCGSTAMP:
- rc = -EINVAL;
- if (sk)
- rc = sock_get_timestamp(sk, argp);
+ rc = sock_get_timestamp(sk, argp);
break;
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 456b52d8f6d8..32b2155e7ab4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -18,6 +18,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -86,12 +88,6 @@
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "L2TP: " _fmt, ##args); \
- } while (0)
-
/* Private data stored for received packets in the skb.
*/
struct l2tp_skb_cb {
@@ -141,14 +137,20 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
l2tp_tunnel_free(tunnel);
}
#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
- } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
- } while (0)
+#define l2tp_tunnel_inc_refcount(_t) \
+do { \
+ pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_t)->name, \
+ atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+} while (0)
+#define l2tp_tunnel_dec_refcount(_t)
+do { \
+ pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_t)->name, \
+ atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+} while (0)
#else
#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
@@ -337,10 +339,10 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
if (L2TP_SKB_CB(skbp)->ns > ns) {
__skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, L2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, L2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
u64_stats_update_begin(&sstats->syncp);
sstats->rx_oos_packets++;
u64_stats_update_end(&sstats->syncp);
@@ -386,8 +388,8 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
else
session->nr &= 0xffffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
+ session->name, session->nr);
}
/* call private receive handler */
@@ -422,12 +424,12 @@ start:
sstats->rx_seq_discards++;
sstats->rx_errors++;
u64_stats_update_end(&sstats->syncp);
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %u len %d discarded (too old), "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ session->reorder_skip = 1;
__skb_unlink(skb, &session->reorder_q);
kfree_skb(skb);
if (session->deref)
@@ -436,13 +438,20 @@ start:
}
if (L2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_skip) {
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: advancing nr to next pkt: %u -> %u",
+ session->name, session->nr,
+ L2TP_SKB_CB(skb)->ns);
+ session->reorder_skip = 0;
+ session->nr = L2TP_SKB_CB(skb)->ns;
+ }
if (L2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %u len %d, "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
goto out;
}
}
@@ -586,9 +595,10 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: cookie mismatch (%u/%u). Discarding.\n",
- tunnel->name, tunnel->tunnel_id, session->session_id);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id,
+ session->session_id);
u64_stats_update_begin(&sstats->syncp);
sstats->rx_cookie_discards++;
u64_stats_update_end(&sstats->syncp);
@@ -617,9 +627,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
L2TP_SKB_CB(skb)->ns = ns;
L2TP_SKB_CB(skb)->has_seq = 1;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%u, nr=%u, session nr=%u\n",
- session->name, ns, nr, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: recv data ns=%u, nr=%u, session nr=%u\n",
+ session->name, ns, nr, session->nr);
}
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
u32 l2h = ntohl(*(__be32 *) ptr);
@@ -631,9 +641,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
L2TP_SKB_CB(skb)->ns = ns;
L2TP_SKB_CB(skb)->has_seq = 1;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%u, session nr=%u\n",
- session->name, ns, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: recv data ns=%u, session nr=%u\n",
+ session->name, ns, session->nr);
}
}
@@ -646,9 +656,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* configure it so.
*/
if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
+ l2tp_info(session, L2TP_MSG_SEQ,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
session->send_seq = -1;
l2tp_session_set_header_len(session, tunnel->version);
}
@@ -657,9 +667,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* If user has configured mandatory sequence numbers, discard.
*/
if (session->recv_seq) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
+ l2tp_warn(session, L2TP_MSG_SEQ,
+ "%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
u64_stats_update_begin(&sstats->syncp);
sstats->rx_seq_discards++;
u64_stats_update_end(&sstats->syncp);
@@ -672,15 +682,15 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* LAC is broken. Discard the frame.
*/
if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
+ l2tp_info(session, L2TP_MSG_SEQ,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
session->send_seq = 0;
l2tp_session_set_header_len(session, tunnel->version);
} else if (session->send_seq) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
+ l2tp_warn(session, L2TP_MSG_SEQ,
+ "%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
u64_stats_update_begin(&sstats->syncp);
sstats->rx_seq_discards++;
u64_stats_update_end(&sstats->syncp);
@@ -740,12 +750,11 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
u64_stats_update_begin(&sstats->syncp);
sstats->rx_seq_discards++;
u64_stats_update_end(&sstats->syncp);
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %u len %d discarded, "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
goto discard;
}
skb_queue_tail(&session->reorder_q, skb);
@@ -791,7 +800,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
unsigned char *ptr, *optr;
u16 hdrflags;
u32 tunnel_id, session_id;
- int offset;
u16 version;
int length;
struct l2tp_stats *tstats;
@@ -804,8 +812,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* Short packet? */
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: recv short packet (len=%d)\n",
+ tunnel->name, skb->len);
goto error;
}
@@ -815,14 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
if (!pskb_may_pull(skb, length))
goto error;
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", skb->data[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
}
/* Point to L2TP header */
@@ -834,9 +837,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* Check protocol version */
version = hdrflags & L2TP_HDR_VER_MASK;
if (version != tunnel->version) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: recv protocol version mismatch: got %d expected %d\n",
- tunnel->name, version, tunnel->version);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
goto error;
}
@@ -845,8 +848,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* If type is control packet, it is handled by userspace. */
if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
+ l2tp_dbg(tunnel, L2TP_MSG_DATA,
+ "%s: recv control packet, len=%d\n",
+ tunnel->name, length);
goto error;
}
@@ -874,9 +878,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
if (!session || !session->recv_skb) {
/* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: no session found (%u/%u). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
goto error;
}
@@ -916,8 +920,8 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (tunnel == NULL)
goto pass_up;
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
+ l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
+ tunnel->name, skb->len);
if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
goto pass_up_put;
@@ -959,8 +963,8 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
*bufp++ = 0;
session->ns++;
session->ns &= 0xffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %u\n", session->name, session->ns);
+ l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
+ session->name, session->ns);
}
return bufp - optr;
@@ -996,8 +1000,9 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
l2h = 0x40000000 | session->ns;
session->ns++;
session->ns &= 0xffffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %u\n", session->name, session->ns);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: updated ns to %u\n",
+ session->name, session->ns);
}
*((__be32 *) bufp) = htonl(l2h);
@@ -1020,27 +1025,19 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Debug */
if (session->send_seq)
- PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%u\n", session->name,
- data_len, session->ns - 1);
+ l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
+ session->name, data_len, session->ns - 1);
else
- PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, data_len);
+ l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
+ session->name, data_len);
if (session->debug & L2TP_MSG_DATA) {
- int i;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
unsigned char *datap = skb->data + uhlen;
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < (len - uhlen); i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
+ pr_debug("%s: xmit\n", session->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ datap, min_t(size_t, 32, len - uhlen));
}
/* Queue the packet to IP for output */
@@ -1239,8 +1236,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
if (tunnel == NULL)
goto end;
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
/* Close all sessions */
l2tp_tunnel_closeall(tunnel);
@@ -1282,8 +1278,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
BUG_ON(tunnel == NULL);
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
+ tunnel->name);
write_lock_bh(&tunnel->hlist_lock);
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
@@ -1291,8 +1287,8 @@ again:
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
session = hlist_entry(walk, struct l2tp_session, hlist);
- PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
+ l2tp_info(session, L2TP_MSG_CONTROL,
+ "%s: closing session\n", session->name);
hlist_del_init(&session->hlist);
@@ -1345,8 +1341,7 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
BUG_ON(atomic_read(&tunnel->ref_count) != 0);
BUG_ON(tunnel->sock != NULL);
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: free...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1527,7 +1522,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
err = -EBADF;
sock = sockfd_lookup(fd, &err);
if (!sock) {
- printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
tunnel_id, fd, err);
goto err;
}
@@ -1543,7 +1538,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
case L2TP_ENCAPTYPE_UDP:
err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_UDP) {
- printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
goto err;
}
@@ -1551,7 +1546,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
case L2TP_ENCAPTYPE_IP:
err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_L2TP) {
- printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
goto err;
}
@@ -1753,7 +1748,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
session->session_id = session_id;
session->peer_session_id = peer_session_id;
- session->nr = 1;
+ session->nr = 0;
sprintf(&session->name[0], "sess %u/%u",
tunnel->tunnel_id, session->session_id);
@@ -1859,7 +1854,7 @@ static int __init l2tp_init(void)
if (rc)
goto out;
- printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
+ pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
out:
return rc;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 0bf60fc88bb7..a38ec6cdeee1 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -123,6 +123,7 @@ struct l2tp_session {
* categories */
int reorder_timeout; /* configured reorder timeout
* (in jiffies) */
+ int reorder_skip; /* set if skip to next nr */
int mtu;
int mru;
enum l2tp_pwtype pwtype;
@@ -260,17 +261,36 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
}
#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_session_inc_refcount(_s) do { \
- printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
- l2tp_session_inc_refcount_1(_s); \
- } while (0)
-#define l2tp_session_dec_refcount(_s) do { \
- printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
- l2tp_session_dec_refcount_1(_s); \
- } while (0)
+#define l2tp_session_inc_refcount(_s) \
+do { \
+ pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_s)->name, \
+ atomic_read(&_s->ref_count)); \
+ l2tp_session_inc_refcount_1(_s); \
+} while (0)
+#define l2tp_session_dec_refcount(_s) \
+do { \
+ pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_s)->name, \
+ atomic_read(&_s->ref_count)); \
+ l2tp_session_dec_refcount_1(_s); \
+} while (0)
#else
#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
#endif
+#define l2tp_printk(ptr, type, func, fmt, ...) \
+do { \
+ if (((ptr)->debug) & (type)) \
+ func(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define l2tp_warn(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__)
+#define l2tp_info(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__)
+#define l2tp_dbg(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
+
#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index c0d57bad8b79..c3813bc84552 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
@@ -325,11 +327,11 @@ static int __init l2tp_debugfs_init(void)
if (tunnels == NULL)
rc = -EIO;
- printk(KERN_INFO "L2TP debugfs support\n");
+ pr_info("L2TP debugfs support\n");
out:
if (rc)
- printk(KERN_WARNING "l2tp debugfs: unable to init\n");
+ pr_warn("unable to init\n");
return rc;
}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 63fe5f353f04..443591d629ca 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
@@ -115,21 +117,14 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
- int offset;
u8 *ptr = skb->data;
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto error;
- printk(KERN_DEBUG "%s: eth recv: ", session->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: eth recv\n", session->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -308,7 +303,7 @@ static int __init l2tp_eth_init(void)
if (err)
goto out_unreg;
- printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
+ pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index c89a32fb5d5e..889f5d13d7ba 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -120,7 +122,6 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
- int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -155,14 +156,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
- printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: ip recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
@@ -593,7 +588,7 @@ static int __init l2tp_ip_init(void)
{
int err;
- printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
+ pr_info("L2TP IP encapsulation support (L2TPv3)\n");
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 88f0abe35443..0291d8d85f30 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -133,7 +135,6 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
- int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -168,14 +169,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
- printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: ip recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
@@ -752,7 +747,7 @@ static int __init l2tp_ip6_init(void)
{
int err;
- printk(KERN_INFO "L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+ pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
err = proto_register(&l2tp_ip6_prot, 1);
if (err != 0)
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 24edad0fd9ba..8577264378fe 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -14,6 +14,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/udp.h>
@@ -902,7 +904,7 @@ static int l2tp_nl_init(void)
{
int err;
- printk(KERN_INFO "L2TP netlink interface\n");
+ pr_info("L2TP netlink interface\n");
err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
ARRAY_SIZE(l2tp_nl_ops));
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 9f2c421aa307..8ef6b9416cba 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -57,6 +57,8 @@
* http://openl2tp.sourceforge.net.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -106,12 +108,6 @@
/* Space for UDP, L2TP and PPP headers */
#define PPPOL2TP_HEADER_OVERHEAD 40
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while (0)
-
/* Number of bytes to build transmit L2TP headers.
* Unfortunately the size is different depending on whether sequence numbers
* are enabled.
@@ -236,9 +232,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, data_len);
+ l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, data_len);
/* We need to forget all info related to the L2TP packet
* gathered in the skb as we are going to reuse the same
@@ -259,8 +255,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
+ session->name);
/* Not bound. Nothing we can do, so discard. */
session->stats.rx_errors++;
@@ -270,8 +266,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
return;
no_sock:
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
kfree_skb(skb);
}
@@ -827,8 +822,8 @@ out_no_ppp:
/* This is how we get the session context from the socket. */
sk->sk_user_data = session;
sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+ session->name);
end:
release_sock(sk);
@@ -881,8 +876,8 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+ session->name);
error = 0;
@@ -1058,9 +1053,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
+ l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
sk = ps->sock;
sock_hold(sk);
@@ -1078,8 +1073,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
+ session->name, session->mtu);
err = 0;
break;
@@ -1094,8 +1089,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
session->mtu = ifr.ifr_mtu;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
+ session->name, session->mtu);
err = 0;
break;
@@ -1108,8 +1103,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(session->mru, (int __user *) arg))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
+ session->name, session->mru);
err = 0;
break;
@@ -1123,8 +1118,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
break;
session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
+ session->name, session->mru);
err = 0;
break;
@@ -1133,8 +1128,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(ps->flags, (int __user *) arg))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, ps->flags);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
+ session->name, ps->flags);
err = 0;
break;
@@ -1143,8 +1138,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, ps->flags);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
+ session->name, ps->flags);
err = 0;
break;
@@ -1160,8 +1155,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ session->name);
err = 0;
break;
@@ -1188,9 +1183,9 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
struct sock *sk;
struct pppol2tp_ioc_stats stats;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
- tunnel->name, cmd, arg);
+ l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
+ tunnel->name, cmd, arg);
sk = tunnel->sock;
sock_hold(sk);
@@ -1224,8 +1219,8 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
err = -EFAULT;
break;
}
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ tunnel->name);
err = 0;
break;
@@ -1314,8 +1309,8 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ tunnel->name, tunnel->debug);
break;
default:
@@ -1342,8 +1337,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name, session->recv_seq);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set recv_seq=%d\n",
+ session->name, session->recv_seq);
break;
case PPPOL2TP_SO_SENDSEQ:
@@ -1358,8 +1354,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set send_seq=%d\n",
+ session->name, session->send_seq);
break;
case PPPOL2TP_SO_LNSMODE:
@@ -1368,20 +1365,22 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name, session->lns_mode);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set lns_mode=%d\n",
+ session->name, session->lns_mode);
break;
case PPPOL2TP_SO_DEBUG:
session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ session->name, session->debug);
break;
case PPPOL2TP_SO_REORDERTO:
session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set reorder_timeout=%d\n",
+ session->name, session->reorder_timeout);
break;
default:
@@ -1460,8 +1459,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
*val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
+ tunnel->name, tunnel->debug);
break;
default:
@@ -1483,32 +1482,32 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
*val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get recv_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_SENDSEQ:
*val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get send_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_LNSMODE:
*val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get lns_mode=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_DEBUG:
*val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
+ session->name, *val);
break;
case PPPOL2TP_SO_REORDERTO:
*val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
break;
default:
@@ -1871,8 +1870,7 @@ static int __init pppol2tp_init(void)
goto out_unregister_pppox;
#endif
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
+ pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION);
out:
return err;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index ab3d35f23257..3cdaa046c1bc 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -15,6 +15,8 @@
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -279,9 +281,7 @@ int lapb_connect_request(struct net_device *dev)
lapb_establish_data_link(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev);
lapb->state = LAPB_STATE_1;
rc = LAPB_OK;
@@ -305,12 +305,8 @@ int lapb_disconnect_request(struct net_device *dev)
goto out_put;
case LAPB_STATE_1:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -329,12 +325,8 @@ int lapb_disconnect_request(struct net_device *dev)
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_2;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
+ lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
rc = LAPB_OK;
out_put:
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index f4e3c1accab7..5dba899131b3 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -15,6 +15,8 @@
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -44,25 +46,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -78,18 +71,11 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -102,22 +88,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_connect_indication(lapb, LAPB_OK);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
break;
@@ -137,68 +117,45 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
break;
case LAPB_UA:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev);
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_3;
@@ -212,14 +169,9 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -242,34 +194,22 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
switch (frame->type) {
case LAPB_SABM:
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
break;
case LAPB_UA:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
@@ -278,14 +218,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
@@ -297,12 +232,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
case LAPB_REJ:
case LAPB_RNR:
case LAPB_RR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
+ lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf)
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
@@ -325,22 +257,15 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -355,15 +280,10 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -375,23 +295,16 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_requeue_frames(lapb);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
lapb_start_t1timer(lapb);
@@ -401,13 +314,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -416,10 +324,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_RNR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -428,9 +334,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -439,10 +343,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_RR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -451,9 +353,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -462,10 +362,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_REJ:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -477,9 +375,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -488,17 +384,13 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_I:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n",
- lapb->dev, frame->pf, frame->ns, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n",
+ lapb->dev, frame->pf, frame->ns, frame->nr);
if (!lapb_validate_nr(lapb, frame->nr)) {
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -522,7 +414,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
* a frame lost on the wire.
*/
if (cn == NET_RX_DROP) {
- printk(KERN_DEBUG "LAPB: rx congestion\n");
+ pr_debug("rx congestion\n");
break;
}
lapb->vr = (lapb->vr + 1) % modulus;
@@ -541,11 +433,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
if (frame->pf)
lapb_enquiry_response(lapb);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG
- "lapb: (%p) S3 TX REJ(%d) R%d\n",
- lapb->dev, frame->pf, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n",
+ lapb->dev, frame->pf, lapb->vr);
lapb->condition |= LAPB_REJECT_CONDITION;
lapb_send_control(lapb, LAPB_REJ, frame->pf,
LAPB_RESPONSE);
@@ -555,31 +444,22 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_FRMR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X "
- "%02X %02X %02X %02X\n", lapb->dev, frame->pf,
- skb->data[0], skb->data[1], skb->data[2],
- skb->data[3], skb->data[4]);
-#endif
+ lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+ lapb->dev, frame->pf,
+ skb->data[0], skb->data[1], skb->data[2],
+ skb->data[3], skb->data[4]);
lapb_establish_data_link(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
lapb_requeue_frames(lapb);
lapb->state = LAPB_STATE_1;
break;
case LAPB_ILLEGAL:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf);
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_W;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -600,25 +480,16 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -634,18 +505,11 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -658,10 +522,8 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_connect_indication(lapb, LAPB_OK);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index baab2760f651..ba4d015bd1a6 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -14,6 +14,8 @@
* LAPB 002 Jonathan Naylor New timer architecture.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -60,10 +62,8 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll
*frame |= lapb->vs << 1;
}
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n",
- lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n",
+ lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
}
@@ -148,11 +148,9 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
}
}
-#if LAPB_DEBUG > 2
- printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
-#endif
+ lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[0], skb->data[1], skb->data[2]);
if (!lapb_data_transmit(lapb, skb))
kfree_skb(skb);
@@ -164,16 +162,10 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
lapb->n2count = 0;
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n",
- lapb->dev, lapb->state);
-#endif
+ lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state);
lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n",
- lapb->dev, lapb->state);
-#endif
+ lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state);
lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
}
@@ -183,10 +175,8 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
void lapb_enquiry_response(struct lapb_cb *lapb)
{
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n",
- lapb->dev, lapb->state, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n",
+ lapb->dev, lapb->state, lapb->vr);
lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE);
@@ -195,10 +185,8 @@ void lapb_enquiry_response(struct lapb_cb *lapb)
void lapb_timeout_response(struct lapb_cb *lapb)
{
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n",
- lapb->dev, lapb->state, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n",
+ lapb->dev, lapb->state, lapb->vr);
lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 066225b4e824..9d0a426eccbb 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -13,6 +13,8 @@
* LAPB 001 Jonathan Naylor Started Coding
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -111,11 +113,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
{
frame->type = LAPB_ILLEGAL;
-#if LAPB_DEBUG > 2
- printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
-#endif
+ lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[0], skb->data[1], skb->data[2]);
/* We always need to look at 2 bytes, sometimes we need
* to look at 3 and those cases are handled below.
@@ -284,12 +284,10 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
dptr++;
*dptr++ = lapb->frmr_type;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[1], skb->data[2], skb->data[3],
- skb->data[4], skb->data[5]);
-#endif
+ lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5]);
} else {
dptr = skb_put(skb, 4);
*dptr++ = LAPB_FRMR;
@@ -301,11 +299,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
dptr++;
*dptr++ = lapb->frmr_type;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n",
- lapb->dev, lapb->state, skb->data[1],
- skb->data[2], skb->data[3]);
-#endif
+ lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
+ lapb->dev, lapb->state, skb->data[1],
+ skb->data[2], skb->data[3]);
}
lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index f8cd641dfc82..54563ad8aeb1 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -14,6 +14,8 @@
* LAPB 002 Jonathan Naylor New timer architecture.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -105,21 +107,17 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX SABME(1)\n",
+ lapb->dev);
lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX SABM(1)\n",
+ lapb->dev);
lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
}
}
@@ -133,15 +131,11 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
}
break;
@@ -155,9 +149,7 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb->state = LAPB_STATE_0;
lapb_stop_t2timer(lapb);
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
@@ -173,9 +165,7 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 17bc85d5b7ba..fe5453c3e719 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -71,8 +71,7 @@ static inline u16 llc_ui_next_link_no(int sap)
*/
static inline __be16 llc_proto_type(u16 arphrd)
{
- return arphrd == ARPHRD_IEEE802_TR ?
- htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
+ return htons(ETH_P_802_2);
}
/**
@@ -806,10 +805,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
sk_wait_data(sk, &timeo);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
- if (net_ratelimit())
- printk(KERN_DEBUG "LLC(%s:%d): Application "
- "bug, race in MSG_PEEK.\n",
- current->comm, task_pid_nr(current));
+ net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
+ current->comm,
+ task_pid_nr(current));
peek_seq = llc->copied_seq;
}
continue;
@@ -840,7 +838,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
@@ -863,7 +861,7 @@ copy_uaddr:
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b658cba89fdd..2dae8a5df23f 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -14,9 +14,7 @@
*/
#include <linux/if_arp.h>
-#include <linux/if_tr.h>
#include <linux/netdevice.h>
-#include <linux/trdevice.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <net/llc.h>
@@ -37,7 +35,6 @@ int llc_mac_hdr_init(struct sk_buff *skb,
int rc = -EINVAL;
switch (skb->dev->type) {
- case ARPHRD_IEEE802_TR:
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 94e7fca75b85..7c5073badc73 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,10 +31,6 @@ static int llc_mac_header_len(unsigned short devtype)
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
return sizeof(struct ethhdr);
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
- case ARPHRD_IEEE802_TR:
- return sizeof(struct trh_hdr);
-#endif
}
return 0;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a070d4f460ea..26ddb699d693 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -260,11 +260,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
(buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "AddBA Req with bad params from "
- "%pM on tid %u. policy %d, buffer size %d\n",
- mgmt->sa, tid, ba_policy,
- buf_size);
+ net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+ mgmt->sa, tid, ba_policy, buf_size);
#endif /* CONFIG_MAC80211_HT_DEBUG */
goto end_no_lock;
}
@@ -281,10 +278,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (sta->ampdu_mlme.tid_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "unexpected AddBA Req from "
- "%pM on tid %u\n",
- mgmt->sa, tid);
+ net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
+ mgmt->sa, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
/* delete existing Rx BA session on the same tid */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 0221270c0ddf..495831ee48f1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1093,7 +1093,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
} else
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (compare_ether_addr(mac, sdata->vif.addr) == 0)
+ if (ether_addr_equal(mac, sdata->vif.addr))
return -EINVAL;
if (is_multicast_ether_addr(mac))
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 9b603366943c..6f8615c54b22 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -306,10 +306,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
- mgmt->sa, initiator ? "initiator" : "recipient", tid,
- le16_to_cpu(mgmt->u.action.u.delba.reason_code));
+ net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
+ mgmt->sa, initiator ? "initiator" : "recipient",
+ tid,
+ le16_to_cpu(mgmt->u.action.u.delba.reason_code));
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (initiator == WLAN_BACK_INITIATOR)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 11ac1ffd9570..33d9d0c3e3d0 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
skb_reset_tail_pointer(skb);
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
- if (compare_ether_addr(ifibss->bssid, bssid))
+ if (!ether_addr_equal(ifibss->bssid, bssid))
sta_info_flush(sdata->local, sdata);
/* if merging, indicate to driver that we leave the old IBSS */
@@ -308,9 +308,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->name, addr);
+ net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ sdata->name, addr);
rcu_read_lock();
return NULL;
}
@@ -320,7 +319,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
}
- if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+ if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) {
rcu_read_lock();
return NULL;
}
@@ -406,7 +405,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
return;
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) {
+ ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
@@ -511,7 +510,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* same BSSID */
- if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0)
+ if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
goto put_bss;
if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -587,16 +586,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->name, addr);
+ net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ sdata->name, addr);
return;
}
if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
return;
- if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
+ if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
return;
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -834,7 +832,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
- if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 &&
+ if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) &&
!is_broadcast_ether_addr(mgmt->bssid))
return;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ae046b52d5e2..3f3cd50fff16 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1195,7 +1195,7 @@ static inline struct ieee80211_local *hw_to_local(
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
{
- return compare_ether_addr(raddr, addr) == 0 ||
+ return ether_addr_equal(raddr, addr) ||
is_broadcast_ether_addr(raddr);
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index c550945dd703..d4c19a7773db 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -127,7 +127,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* The remaining checks are only performed for interfaces
* with the same MAC address.
*/
- if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
continue;
/*
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d3a9a6c081e7..2913113c5833 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -217,7 +217,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) &&
- (compare_ether_addr(sa, p->sa) == 0))
+ (ether_addr_equal(sa, p->sa)))
return -1;
}
@@ -651,7 +651,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
/* ignore ProbeResp to foreign address */
if (stype == IEEE80211_STYPE_PROBE_RESP &&
- compare_ether_addr(mgmt->da, sdata->vif.addr))
+ !ether_addr_equal(mgmt->da, sdata->vif.addr))
return;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 70ac7d180077..9b59658e8650 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -422,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
+ if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
@@ -472,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
/* Update and check transmitter routing info */
ta = mgmt->sa;
- if (compare_ether_addr(orig_addr, ta) == 0)
+ if (ether_addr_equal(orig_addr, ta))
fresh_info = false;
else {
fresh_info = true;
@@ -533,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREQ from %pM", orig_addr);
- if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
+ if (ether_addr_equal(target_addr, sdata->vif.addr)) {
mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
@@ -634,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
/* destination, no forwarding required */
return;
@@ -712,7 +712,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
spin_lock_bh(&mpath->state_lock);
sta = next_hop_deref_protected(mpath);
if (mpath->flags & MESH_PATH_ACTIVE &&
- compare_ether_addr(ta, sta->sta.addr) == 0 &&
+ ether_addr_equal(ta, sta->sta.addr) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
SN_GT(target_sn, mpath->sn))) {
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -759,7 +759,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
@@ -1102,7 +1102,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
+ ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED))
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index baa6096c66b4..b39224d8255c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -348,7 +348,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0) {
+ ether_addr_equal(dst, mpath->dst)) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+ if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -561,7 +561,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0)
+ ether_addr_equal(dst, mpath->dst))
goto err_exists;
}
@@ -652,7 +652,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+ if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -690,7 +690,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0)
+ ether_addr_equal(dst, mpath->dst))
goto err_exists;
}
@@ -884,7 +884,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(addr, mpath->dst) == 0) {
+ ether_addr_equal(addr, mpath->dst)) {
__mesh_path_del(tbl, node);
goto enddel;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dbd4bd92c018..b3b3c264ff66 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1567,9 +1567,9 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (beacon && net_ratelimit())
- printk(KERN_DEBUG "%s: detected beacon loss from AP "
- "- sending probe request\n", sdata->name);
+ if (beacon)
+ net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
+ sdata->name);
#endif
/*
@@ -1776,7 +1776,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
- if (compare_ether_addr(bssid, mgmt->bssid))
+ if (!ether_addr_equal(bssid, mgmt->bssid))
return RX_MGMT_NONE;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
@@ -1853,7 +1853,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
bssid = ifmgd->associated->bssid;
@@ -1886,7 +1886,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -2113,7 +2113,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return RX_MGMT_NONE;
- if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid))
+ if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
return RX_MGMT_NONE;
/*
@@ -2193,8 +2193,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
bool need_ps = false;
if (sdata->u.mgd.associated &&
- compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
bss = (void *)sdata->u.mgd.associated->priv;
/* not previously set so we may need to recalc */
need_ps = !bss->dtim_period;
@@ -2249,7 +2248,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ASSERT_MGD_MTX(ifmgd);
- if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -2262,12 +2261,11 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
if (ifmgd->associated &&
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0)
+ ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
ieee80211_reset_ap_probe(sdata);
if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
- compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
/* got probe response, continue with auth */
printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
ifmgd->auth_data->tries = 0;
@@ -2324,8 +2322,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
return;
if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
- compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
ieee802_11_parse_elems(mgmt->u.beacon.variable,
len - baselen, &elems);
@@ -2340,7 +2337,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return;
bssid = ifmgd->associated->bssid;
@@ -2407,10 +2404,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: cancelling probereq poll due "
- "to a received beacon\n", sdata->name);
- }
+ net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
+ sdata->name);
#endif
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
mutex_lock(&local->iflist_mtx);
@@ -3166,7 +3161,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return err;
}
} else
- WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid));
+ WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
return 0;
}
@@ -3306,7 +3301,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
bool match;
/* keep sta info, bssid if matching */
- match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0;
+ match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
ieee80211_destroy_auth_data(sdata, match);
}
@@ -3466,7 +3461,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
sdata->name, req->bssid, req->reason_code);
if (ifmgd->associated &&
- compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0)
+ ether_addr_equal(ifmgd->associated->bssid, req->bssid))
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, true, frame_buf);
else
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 489093b08a4a..7bcecf73aafb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -492,12 +492,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_has_tods(hdr->frame_control) ||
!ieee80211_has_fromds(hdr->frame_control))
return RX_DROP_MONITOR;
- if (compare_ether_addr(hdr->addr3, dev_addr) == 0)
+ if (ether_addr_equal(hdr->addr3, dev_addr))
return RX_DROP_MONITOR;
} else {
if (!ieee80211_has_a4(hdr->frame_control))
return RX_DROP_MONITOR;
- if (compare_ether_addr(hdr->addr4, dev_addr) == 0)
+ if (ether_addr_equal(hdr->addr4, dev_addr))
return RX_DROP_MONITOR;
}
}
@@ -1275,7 +1275,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
NL80211_IFTYPE_ADHOC);
- if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
+ if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
sta->last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control)) {
sta->last_rx_rate_idx = status->rate_idx;
@@ -1438,8 +1438,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
*/
if (((hdr->frame_control ^ f_hdr->frame_control) &
cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
- compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
- compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
+ !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
+ !ether_addr_equal(hdr->addr2, f_hdr->addr2))
continue;
if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
@@ -1714,8 +1714,8 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* of whether the frame was encrypted or not.
*/
if (ehdr->h_proto == rx->sdata->control_port_protocol &&
- (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
- compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
+ (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+ ether_addr_equal(ehdr->h_dest, pae_group_addr)))
return true;
if (ieee80211_802_1x_port_control(rx) ||
@@ -1752,9 +1752,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* local net stack and back to the wireless medium
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
- if (!xmit_skb && net_ratelimit())
- printk(KERN_DEBUG "%s: failed to clone "
- "multicast frame\n", dev->name);
+ if (!xmit_skb)
+ net_dbg_ratelimited("%s: failed to clone multicast frame\n",
+ dev->name);
} else {
dsta = sta_info_get(sdata, skb->data);
if (dsta) {
@@ -1925,7 +1925,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mpp_path_add(proxied_addr, mpp_addr, sdata);
} else {
spin_lock_bh(&mppath->state_lock);
- if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
+ if (!ether_addr_equal(mppath->mpp, mpp_addr))
memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
spin_unlock_bh(&mppath->state_lock);
}
@@ -1934,7 +1934,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* Frame has reached destination. Don't forward */
if (!is_multicast_ether_addr(hdr->addr1) &&
- compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
+ ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
q = ieee80211_select_queue_80211(local, skb, hdr);
@@ -1957,9 +1957,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- sdata->name);
+ net_dbg_ratelimited("%s: failed to clone mesh frame\n",
+ sdata->name);
goto out;
}
@@ -2122,13 +2121,13 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
- if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
/* Not to own unicast address */
return;
}
- if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
- compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
+ if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
+ !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
/* Not from the current AP or not associated yet. */
return;
}
@@ -2338,7 +2337,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (sdata->vif.type != NL80211_IFTYPE_STATION)
break;
- if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid))
+ if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
break;
goto queue;
@@ -2772,7 +2771,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
if (!bssid && !sdata->u.mgd.use_4addr)
return 0;
if (!multicast &&
- compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC) ||
sdata->u.mgd.use_4addr)
return 0;
@@ -2790,8 +2789,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!multicast &&
- compare_ether_addr(sdata->vif.addr,
- hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2807,8 +2805,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
break;
case NL80211_IFTYPE_MESH_POINT:
if (!multicast &&
- compare_ether_addr(sdata->vif.addr,
- hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2818,8 +2815,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
if (!bssid) {
- if (compare_ether_addr(sdata->vif.addr,
- hdr->addr1))
+ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
@@ -2841,7 +2837,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
return 0;
- if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
+ if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
return 0;
break;
default:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 8282284f835c..169da0742c81 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -194,7 +194,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
presp = ieee80211_is_probe_resp(fc);
if (presp) {
/* ignore ProbeResp to foreign address */
- if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
return RX_DROP_MONITOR;
presp = true;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 97a9d6639fb9..f5b1638fbf80 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -102,7 +102,7 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
- compare_ether_addr(sta->sta.addr, addr) == 0)
+ ether_addr_equal(sta->sta.addr, addr))
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
@@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
- compare_ether_addr(sta->sta.addr, addr) == 0)
+ ether_addr_equal(sta->sta.addr, addr))
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
@@ -302,7 +302,7 @@ static int sta_info_insert_check(struct sta_info *sta)
if (unlikely(!ieee80211_sdata_running(sdata)))
return -ENETDOWN;
- if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
+ if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
is_multicast_ether_addr(sta->sta.addr)))
return -EINVAL;
@@ -912,7 +912,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
*/
for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
if (localaddr &&
- compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0)
+ !ether_addr_equal(sta->sdata->vif.addr, localaddr))
continue;
if (!sta->uploaded)
return NULL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 663dc90c4e31..3bb24a121c95 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -502,7 +502,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
/* compare address and run code only if it matches */ \
- if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0)
+ if (ether_addr_equal(_sta->sta.addr, (_addr)))
/*
* Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 05f257aa2e08..28cfa981cfb1 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -384,7 +384,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
- if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
+ if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
if (info->flags & IEEE80211_TX_STATUS_EOSP)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d67d36f57d78..5f827a6b0d8d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -413,9 +413,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
- tx->sdata->name);
+ net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
+ tx->sdata->name);
#endif
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
@@ -476,10 +475,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: STA %pM TX buffer for "
- "AC %d full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr, ac);
+ net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ tx->sdata->name, sta->sta.addr, ac);
#endif
dev_kfree_skb(old);
} else
@@ -1665,7 +1662,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
u8 *payload = (u8 *)hdr + hdrlen;
- if (compare_ether_addr(payload, rfc1042_header) == 0)
+ if (ether_addr_equal(payload, rfc1042_header))
skb->protocol = cpu_to_be16((payload[6] << 8) |
payload[7]);
}
@@ -1698,7 +1695,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
continue;
- if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+ if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
sdata = tmp_sdata;
break;
}
@@ -1815,9 +1812,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* is being proxied by a portal (i.e. portal address
* differs from proxied address)
*/
- if (compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0 &&
- !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
+ if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
+ !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
rcu_read_unlock();
@@ -1964,12 +1960,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
!is_multicast_ether_addr(hdr.addr1) && !authorized &&
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
- compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
+ !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: dropped frame to %pM"
- " (unauthorized port)\n", dev->name,
- hdr.addr1);
+ net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
+ dev->name, hdr.addr1);
#endif
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
new file mode 100644
index 000000000000..a967ddaa4e2f
--- /dev/null
+++ b/net/mac802154/Kconfig
@@ -0,0 +1,16 @@
+config MAC802154
+ tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
+ depends on IEEE802154 && EXPERIMENTAL
+ select CRC_CCITT
+ ---help---
+ This option enables the hardware independent IEEE 802.15.4
+ networking stack for SoftMAC devices (the ones implementing
+ only PHY level of IEEE 802.15.4 standard).
+
+ Note: this implementation is neither certified, nor feature
+ complete! Compatibility with other implementations hasn't
+ been tested yet!
+
+ If you plan to use HardMAC IEEE 802.15.4 devices, you can
+ say N here. Alternatievly you can say M to compile it as
+ module.
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
new file mode 100644
index 000000000000..ec1bd3fc1273
--- /dev/null
+++ b/net/mac802154/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MAC802154) += mac802154.o
+mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
new file mode 100644
index 000000000000..e3edfb0661b0
--- /dev/null
+++ b/net/mac802154/ieee802154_dev.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * Written by:
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ *
+ * Based on the code from 'linux-zigbee.sourceforge.net' project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <net/netlink.h>
+#include <linux/nl802154.h>
+#include <net/mac802154.h>
+#include <net/route.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+int mac802154_slave_open(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *ipriv = priv->hw;
+ int res = 0;
+
+ if (ipriv->open_count++ == 0) {
+ res = ipriv->ops->start(&ipriv->hw);
+ WARN_ON(res);
+ if (res)
+ goto err;
+ }
+
+ if (ipriv->ops->ieee_addr) {
+ res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr);
+ WARN_ON(res);
+ if (res)
+ goto err;
+ mac802154_dev_set_ieee_addr(dev);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+err:
+ priv->hw->open_count--;
+
+ return res;
+}
+
+int mac802154_slave_close(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *ipriv = priv->hw;
+
+ netif_stop_queue(dev);
+
+ if (!--ipriv->open_count)
+ ipriv->ops->stop(&ipriv->hw);
+
+ return 0;
+}
+
+static int
+mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ struct mac802154_priv *ipriv;
+ int err;
+
+ ipriv = wpan_phy_priv(phy);
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->hw = ipriv;
+
+ dev->needed_headroom = ipriv->hw.extra_tx_headroom;
+
+ SET_NETDEV_DEV(dev, &ipriv->phy->dev);
+
+ mutex_lock(&ipriv->slaves_mtx);
+ if (!ipriv->running) {
+ mutex_unlock(&ipriv->slaves_mtx);
+ return -ENODEV;
+ }
+ mutex_unlock(&ipriv->slaves_mtx);
+
+ err = register_netdev(dev);
+ if (err < 0)
+ return err;
+
+ rtnl_lock();
+ mutex_lock(&ipriv->slaves_mtx);
+ list_add_tail_rcu(&priv->list, &ipriv->slaves);
+ mutex_unlock(&ipriv->slaves_mtx);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static void
+mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *sdata;
+ ASSERT_RTNL();
+
+ sdata = netdev_priv(dev);
+
+ BUG_ON(sdata->hw->phy != phy);
+
+ mutex_lock(&sdata->hw->slaves_mtx);
+ list_del_rcu(&sdata->list);
+ mutex_unlock(&sdata->hw->slaves_mtx);
+
+ synchronize_rcu();
+ unregister_netdevice(sdata->dev);
+}
+
+static struct net_device *
+mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+
+ switch (type) {
+ case IEEE802154_DEV_MONITOR:
+ dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
+ name, mac802154_monitor_setup);
+ break;
+ default:
+ dev = NULL;
+ err = -EINVAL;
+ break;
+ }
+ if (!dev)
+ goto err;
+
+ err = mac802154_netdev_register(phy, dev);
+ if (err)
+ goto err_free;
+
+ dev_hold(dev); /* we return an incremented device refcount */
+ return dev;
+
+err_free:
+ free_netdev(dev);
+err:
+ return ERR_PTR(err);
+}
+
+struct ieee802154_dev *
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
+{
+ struct wpan_phy *phy;
+ struct mac802154_priv *priv;
+ size_t priv_size;
+
+ if (!ops || !ops->xmit || !ops->ed || !ops->start ||
+ !ops->stop || !ops->set_channel) {
+ printk(KERN_ERR
+ "undefined IEEE802.15.4 device operations\n");
+ return NULL;
+ }
+
+ /* Ensure 32-byte alignment of our private data and hw private data.
+ * We use the wpan_phy priv data for both our mac802154_priv and for
+ * the driver's private data
+ *
+ * in memory it'll be like this:
+ *
+ * +-----------------------+
+ * | struct wpan_phy |
+ * +-----------------------+
+ * | struct mac802154_priv |
+ * +-----------------------+
+ * | driver's private data |
+ * +-----------------------+
+ *
+ * Due to ieee802154 layer isn't aware of driver and MAC structures,
+ * so lets allign them here.
+ */
+
+ priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len;
+
+ phy = wpan_phy_alloc(priv_size);
+ if (!phy) {
+ printk(KERN_ERR
+ "failure to allocate master IEEE802.15.4 device\n");
+ return NULL;
+ }
+
+ priv = wpan_phy_priv(phy);
+ priv->hw.phy = priv->phy = phy;
+ priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
+ priv->ops = ops;
+
+ INIT_LIST_HEAD(&priv->slaves);
+ mutex_init(&priv->slaves_mtx);
+
+ return &priv->hw;
+}
+EXPORT_SYMBOL(ieee802154_alloc_device);
+
+void ieee802154_free_device(struct ieee802154_dev *hw)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+ BUG_ON(!list_empty(&priv->slaves));
+
+ wpan_phy_free(priv->phy);
+
+ mutex_destroy(&priv->slaves_mtx);
+}
+EXPORT_SYMBOL(ieee802154_free_device);
+
+int ieee802154_register_device(struct ieee802154_dev *dev)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ int rc = -ENOMEM;
+
+ priv->dev_workqueue =
+ create_singlethread_workqueue(wpan_phy_name(priv->phy));
+ if (!priv->dev_workqueue)
+ goto out;
+
+ wpan_phy_set_dev(priv->phy, priv->hw.parent);
+
+ priv->phy->add_iface = mac802154_add_iface;
+ priv->phy->del_iface = mac802154_del_iface;
+
+ rc = wpan_phy_register(priv->phy);
+ if (rc < 0)
+ goto out_wq;
+
+ rtnl_lock();
+
+ mutex_lock(&priv->slaves_mtx);
+ priv->running = MAC802154_DEVICE_RUN;
+ mutex_unlock(&priv->slaves_mtx);
+
+ rtnl_unlock();
+
+ return 0;
+
+out_wq:
+ destroy_workqueue(priv->dev_workqueue);
+out:
+ return rc;
+}
+EXPORT_SYMBOL(ieee802154_register_device);
+
+void ieee802154_unregister_device(struct ieee802154_dev *dev)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ struct mac802154_sub_if_data *sdata, *next;
+
+ flush_workqueue(priv->dev_workqueue);
+ destroy_workqueue(priv->dev_workqueue);
+
+ rtnl_lock();
+
+ mutex_lock(&priv->slaves_mtx);
+ priv->running = MAC802154_DEVICE_STOPPED;
+ mutex_unlock(&priv->slaves_mtx);
+
+ list_for_each_entry_safe(sdata, next, &priv->slaves, list) {
+ mutex_lock(&sdata->hw->slaves_mtx);
+ list_del(&sdata->list);
+ mutex_unlock(&sdata->hw->slaves_mtx);
+
+ unregister_netdevice(sdata->dev);
+ }
+
+ rtnl_unlock();
+
+ wpan_phy_unregister(priv->phy);
+}
+EXPORT_SYMBOL(ieee802154_unregister_device);
+
+MODULE_DESCRIPTION("IEEE 802.15.4 implementation");
+MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
new file mode 100644
index 000000000000..789d9c948aec
--- /dev/null
+++ b/net/mac802154/mac802154.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#ifndef MAC802154_H
+#define MAC802154_H
+
+/* mac802154 device private data */
+struct mac802154_priv {
+ struct ieee802154_dev hw;
+ struct ieee802154_ops *ops;
+
+ /* ieee802154 phy */
+ struct wpan_phy *phy;
+
+ int open_count;
+
+ /* As in mac80211 slaves list is modified:
+ * 1) under the RTNL
+ * 2) protected by slaves_mtx;
+ * 3) in an RCU manner
+ *
+ * So atomic readers can use any of this protection methods.
+ */
+ struct list_head slaves;
+ struct mutex slaves_mtx;
+
+ /* This one is used for scanning and other jobs not to be interfered
+ * with serial driver.
+ */
+ struct workqueue_struct *dev_workqueue;
+
+ /* SoftMAC device is registered and running. One can add subinterfaces.
+ * This flag should be modified under slaves_mtx and RTNL, so you can
+ * read them using any of protection methods.
+ */
+ bool running;
+};
+
+#define MAC802154_DEVICE_STOPPED 0x00
+#define MAC802154_DEVICE_RUN 0x01
+
+/* Slave interface definition.
+ *
+ * Slaves represent typical network interfaces available from userspace.
+ * Each ieee802154 device/transceiver may have several slaves and able
+ * to be associated with several networks at the same time.
+ */
+struct mac802154_sub_if_data {
+ struct list_head list; /* the ieee802154_priv->slaves list */
+
+ struct mac802154_priv *hw;
+ struct net_device *dev;
+
+ int type;
+
+ spinlock_t mib_lock;
+
+ __le16 pan_id;
+ __le16 short_addr;
+
+ u8 chan;
+ u8 page;
+
+ /* MAC BSN field */
+ u8 bsn;
+ /* MAC DSN field */
+ u8 dsn;
+};
+
+#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
+
+#define MAC802154_MAX_XMIT_ATTEMPTS 3
+
+#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
+
+extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
+
+int mac802154_slave_open(struct net_device *dev);
+int mac802154_slave_close(struct net_device *dev);
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
+void mac802154_monitor_setup(struct net_device *dev);
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+ u8 page, u8 chan);
+
+/* MIB callbacks */
+void mac802154_dev_set_ieee_addr(struct net_device *dev);
+
+#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
new file mode 100644
index 000000000000..7a5d0e052cd7
--- /dev/null
+++ b/net/mac802154/mac_cmd.c
@@ -0,0 +1,45 @@
+/*
+ * MAC commands interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+#include "mac802154.h"
+
+struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return to_phy(get_device(&priv->hw->phy->dev));
+}
+
+struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
+ .get_phy = mac802154_get_phy,
+};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
new file mode 100644
index 000000000000..ab59821ec729
--- /dev/null
+++ b/net/mac802154/mib.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/if_arp.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+struct hw_addr_filt_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+ unsigned long changed;
+};
+
+struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return priv->hw;
+}
+
+static void hw_addr_notify(struct work_struct *work)
+{
+ struct hw_addr_filt_notify_work *nw = container_of(work,
+ struct hw_addr_filt_notify_work, work);
+ struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
+ int res;
+
+ res = hw->ops->set_hw_addr_filt(&hw->hw,
+ &hw->hw.hw_filt,
+ nw->changed);
+ if (res)
+ pr_debug("failed changed mask %lx\n", nw->changed);
+
+ kfree(nw);
+
+ return;
+}
+
+static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct hw_addr_filt_notify_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, hw_addr_notify);
+ work->dev = dev;
+ work->changed = changed;
+ queue_work(priv->hw->dev_workqueue, &work->work);
+
+ return;
+}
+
+void mac802154_dev_set_ieee_addr(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *mac = priv->hw;
+
+ if (mac->ops->set_hw_addr_filt &&
+ memcmp(mac->hw.hw_filt.ieee_addr,
+ dev->dev_addr, IEEE802154_ADDR_LEN)) {
+ memcpy(mac->hw.hw_filt.ieee_addr,
+ dev->dev_addr, IEEE802154_ADDR_LEN);
+ set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
+ }
+}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
new file mode 100644
index 000000000000..434a26f76a80
--- /dev/null
+++ b/net/mac802154/monitor.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/ieee802154.h>
+#include <net/mac802154.h>
+#include <net/netlink.h>
+#include <net/wpan-phy.h>
+#include <linux/nl802154.h>
+
+#include "mac802154.h"
+
+static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ u8 chan, page;
+
+ priv = netdev_priv(dev);
+
+ /* FIXME: locking */
+ chan = priv->hw->phy->current_channel;
+ page = priv->hw->phy->current_page;
+
+ if (chan == MAC802154_CHAN_NONE) /* not initialized */
+ return NETDEV_TX_OK;
+
+ if (WARN_ON(page >= WPAN_NUM_PAGES) ||
+ WARN_ON(chan >= WPAN_NUM_CHANNELS))
+ return NETDEV_TX_OK;
+
+ skb->skb_iif = dev->ifindex;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return mac802154_tx(priv->hw, skb, page, chan);
+}
+
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+ struct mac802154_sub_if_data *sdata;
+ u16 crc = crc_ccitt(0, skb->data, skb->len);
+ u8 *data;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &priv->slaves, list) {
+ if (sdata->type != IEEE802154_DEV_MONITOR)
+ continue;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ skb2->dev = sdata->dev;
+ skb2->pkt_type = PACKET_HOST;
+ data = skb_put(skb2, 2);
+ data[0] = crc & 0xff;
+ data[1] = crc >> 8;
+
+ netif_rx_ni(skb2);
+ }
+ rcu_read_unlock();
+}
+
+static const struct net_device_ops mac802154_monitor_ops = {
+ .ndo_open = mac802154_slave_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = mac802154_monitor_xmit,
+};
+
+void mac802154_monitor_setup(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+
+ dev->addr_len = 0;
+ dev->hard_header_len = 0;
+ dev->needed_tailroom = 2; /* room for FCS */
+ dev->mtu = IEEE802154_MTU;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154_MONITOR;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->destructor = free_netdev;
+ dev->netdev_ops = &mac802154_monitor_ops;
+ dev->ml_priv = &mac802154_mlme_reduced;
+
+ priv = netdev_priv(dev);
+ priv->type = IEEE802154_DEV_MONITOR;
+
+ priv->chan = MAC802154_CHAN_NONE; /* not initialized */
+ priv->page = 0;
+}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
new file mode 100644
index 000000000000..4a7d76d4f8bc
--- /dev/null
+++ b/net/mac802154/rx.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+
+#include "mac802154.h"
+
+/* The IEEE 802.15.4 standard defines 4 MAC packet types:
+ * - beacon frame
+ * - MAC command frame
+ * - acknowledgement frame
+ * - data frame
+ *
+ * and only the data frame should be pushed to the upper layers, other types
+ * are just internal MAC layer management information. So only data packets
+ * are going to be sent to the networking queue, all other will be processed
+ * right here by using the device workqueue.
+ */
+struct rx_work {
+ struct sk_buff *skb;
+ struct work_struct work;
+ struct ieee802154_dev *dev;
+ u8 lqi;
+};
+
+static void
+mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+ mac_cb(skb)->lqi = lqi;
+ skb->protocol = htons(ETH_P_IEEE802154);
+ skb_reset_mac_header(skb);
+
+ BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
+
+ if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+ u16 crc;
+
+ if (skb->len < 2) {
+ pr_debug("got invalid frame\n");
+ goto out;
+ }
+ crc = crc_ccitt(0, skb->data, skb->len);
+ if (crc) {
+ pr_debug("CRC mismatch\n");
+ goto out;
+ }
+ skb_trim(skb, skb->len - 2); /* CRC */
+ }
+
+ mac802154_monitors_rx(priv, skb);
+out:
+ dev_kfree_skb(skb);
+ return;
+}
+
+static void mac802154_rx_worker(struct work_struct *work)
+{
+ struct rx_work *rw = container_of(work, struct rx_work, work);
+ struct sk_buff *skb = rw->skb;
+
+ mac802154_subif_rx(rw->dev, skb, rw->lqi);
+ kfree(rw);
+}
+
+void
+ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ struct rx_work *work;
+
+ if (!skb)
+ return;
+
+ work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mac802154_rx_worker);
+ work->skb = skb;
+ work->dev = dev;
+ work->lqi = lqi;
+
+ queue_work(priv->dev_workqueue, &work->work);
+}
+EXPORT_SYMBOL(ieee802154_rx_irqsafe);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
new file mode 100644
index 000000000000..8781d8f904d9
--- /dev/null
+++ b/net/mac802154/tx.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
+ * packets through the workqueue.
+ */
+struct xmit_work {
+ struct sk_buff *skb;
+ struct work_struct work;
+ struct mac802154_priv *priv;
+ u8 chan;
+ u8 page;
+ u8 xmit_attempts;
+};
+
+static void mac802154_xmit_worker(struct work_struct *work)
+{
+ struct xmit_work *xw = container_of(work, struct xmit_work, work);
+ int res;
+
+ mutex_lock(&xw->priv->phy->pib_lock);
+ if (xw->priv->phy->current_channel != xw->chan ||
+ xw->priv->phy->current_page != xw->page) {
+ res = xw->priv->ops->set_channel(&xw->priv->hw,
+ xw->page,
+ xw->chan);
+ if (res) {
+ pr_debug("set_channel failed\n");
+ goto out;
+ }
+ }
+
+ res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
+
+out:
+ mutex_unlock(&xw->priv->phy->pib_lock);
+
+ if (res) {
+ if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) {
+ queue_work(xw->priv->dev_workqueue, &xw->work);
+ return;
+ } else
+ pr_debug("transmission failed for %d times",
+ MAC802154_MAX_XMIT_ATTEMPTS);
+ }
+
+ dev_kfree_skb(xw->skb);
+
+ kfree(xw);
+}
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+ u8 page, u8 chan)
+{
+ struct xmit_work *work;
+
+ if (!(priv->phy->channels_supported[page] & (1 << chan)))
+ WARN_ON(1);
+ return NETDEV_TX_OK;
+
+ if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+ u16 crc = crc_ccitt(0, skb->data, skb->len);
+ u8 *data = skb_put(skb, 2);
+ data[0] = crc & 0xff;
+ data[1] = crc >> 8;
+ }
+
+ if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
+ if (!work)
+ return NETDEV_TX_BUSY;
+
+ INIT_WORK(&work->work, mac802154_xmit_worker);
+ work->skb = skb;
+ work->priv = priv;
+ work->page = page;
+ work->chan = chan;
+ work->xmit_attempts = 0;
+
+ queue_work(priv->dev_workqueue, &work->work);
+
+ return NETDEV_TX_OK;
+}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0c6f67e8f2e5..209c1ed43368 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -509,6 +509,21 @@ config NETFILTER_XT_TARGET_HL
since you can easily create immortal packets that loop
forever on the network.
+config NETFILTER_XT_TARGET_HMARK
+ tristate '"HMARK" target support'
+ depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds the "HMARK" target.
+
+ The target allows you to create rules in the "raw" and "mangle" tables
+ which set the skbuff mark by means of hash calculation within a given
+ range. The nfmark can influence the routing method (see "Use netfilter
+ MARK value as routing key") and can also be used by other subsystems to
+ change their behaviour.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_IDLETIMER
tristate "IDLETIMER target support"
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ca3676586f51..4e7960cc7b97 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 0bb16c469a89..d7eaf10edb6d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -111,7 +111,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
return -EAGAIN;
case MAC_FILLED:
return data->ether == NULL ||
- compare_ether_addr(data->ether, elem->ether) == 0;
+ ether_addr_equal(data->ether, elem->ether);
}
return 0;
}
@@ -225,7 +225,7 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
return -EAGAIN;
case MAC_FILLED:
return (data->ether == NULL ||
- compare_ether_addr(data->ether, elem->ether) == 0) &&
+ ether_addr_equal(data->ether, elem->ether)) &&
!bitmap_expired(map, data->id);
}
return 0;
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 507fe93794aa..a68dbd4f1e4e 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -368,6 +368,7 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 netmask, hbits;
+ size_t hsize;
struct ip_set_hash *h;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
@@ -409,9 +410,12 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 68f284c97490..92722bb82eea 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -452,6 +452,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -479,9 +480,12 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 1eec4b9e0dca..0637ce096def 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -470,6 +470,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -497,9 +498,12 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 62d66ecef369..1ce21ca976e1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -619,6 +619,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -648,9 +649,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 6607a814be57..c57a6a09906d 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -463,6 +463,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
struct ip_set_hash *h;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -492,9 +493,12 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 6093f3daa911..ee863943c826 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -726,6 +726,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -756,9 +757,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->ahash_max = AHASH_MAX_SIZE;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ae3c644adc14..fc3143a2d41b 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -575,6 +575,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -604,9 +605,12 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 13fd2c55e329..f2de8c55ac50 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -107,8 +107,7 @@ static int amanda_help(struct sk_buff *skb,
/* No data? */
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len) {
- if (net_ratelimit())
- printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len);
+ net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 32c59093146e..ac3af97cc468 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -683,10 +683,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
if (!early_drop(net, hash_bucket(hash, net))) {
atomic_dec(&net->ct.count);
- if (net_ratelimit())
- printk(KERN_WARNING
- "nf_conntrack: table full, dropping"
- " packet.\n");
+ net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
return ERR_PTR(-ENOMEM);
}
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4147ba3f653c..45cf602a76bc 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -424,9 +424,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
if (net->ct.expect_count >= nf_ct_expect_max) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "nf_conntrack: expectation table full\n");
+ net_warn_ratelimited("nf_conntrack: expectation table full\n");
ret = -EMFILE;
}
out:
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 471b054ad002..46d69d7f1bb4 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -605,8 +605,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_h245: packet dropped\n");
+ net_info_ratelimited("nf_ct_h245: packet dropped\n");
return NF_DROP;
}
@@ -1156,8 +1155,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_q931: packet dropped\n");
+ net_info_ratelimited("nf_ct_q931: packet dropped\n");
return NF_DROP;
}
@@ -1731,8 +1729,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_ras: packet dropped\n");
+ net_info_ratelimited("nf_ct_ras: packet dropped\n");
return NF_DROP;
}
@@ -1833,4 +1830,6 @@ MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
MODULE_DESCRIPTION("H.323 connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_h323");
-MODULE_ALIAS_NFCT_HELPER("h323");
+MODULE_ALIAS_NFCT_HELPER("RAS");
+MODULE_ALIAS_NFCT_HELPER("Q.931");
+MODULE_ALIAS_NFCT_HELPER("H.245");
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 4f9390b98697..81366c118271 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -185,11 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
tuple = &ct->tuplehash[dir].tuple;
if (tuple->src.u3.ip != dcc_ip &&
tuple->dst.u3.ip != dcc_ip) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "Forged DCC command from %pI4: %pI4:%u\n",
- &tuple->src.u3.ip,
- &dcc_ip, dcc_port);
+ net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+ &tuple->src.u3.ip,
+ &dcc_ip, dcc_port);
continue;
}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 4dfbfa840f8a..21ff1a99f534 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -952,7 +952,8 @@ static int tcp_packet(struct nf_conn *ct,
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
- "nf_ct_tcp: invalid packet ignored ");
+ "nf_ct_tcp: invalid packet ignored in "
+ "state %s ", tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Invalid packet */
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 8d6bcf32c0ed..4162437b8361 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -395,8 +395,7 @@ nlmsg_failure:
nla_put_failure:
if (skb)
kfree_skb(skb);
- if (net_ratelimit())
- printk(KERN_ERR "nf_queue: error creating packet message\n");
+ net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
}
@@ -433,10 +432,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
}
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
- if (net_ratelimit())
- printk(KERN_WARNING "nf_queue: full at %d entries, "
- "dropping packets(s).\n",
- queue->queue_total);
+ net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+ queue->queue_total);
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 3746d8b9a478..a51de9b052be 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -17,7 +17,6 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
new file mode 100644
index 000000000000..0a96a43108ed
--- /dev/null
+++ b/net/netfilter/xt_HMARK.c
@@ -0,0 +1,362 @@
+/*
+ * xt_HMARK - Netfilter module to set mark by means of hashing
+ *
+ * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HMARK.h>
+
+#include <net/ip.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+#include <net/ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>");
+MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
+MODULE_ALIAS("ipt_HMARK");
+MODULE_ALIAS("ip6t_HMARK");
+
+struct hmark_tuple {
+ u32 src;
+ u32 dst;
+ union hmark_ports uports;
+ uint8_t proto;
+};
+
+static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+{
+ return (addr32[0] & mask[0]) ^
+ (addr32[1] & mask[1]) ^
+ (addr32[2] & mask[2]) ^
+ (addr32[3] & mask[3]);
+}
+
+static inline u32
+hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+{
+ switch (l3num) {
+ case AF_INET:
+ return *addr32 & *mask;
+ case AF_INET6:
+ return hmark_addr6_mask(addr32, mask);
+ }
+ return 0;
+}
+
+static int
+hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conntrack_tuple *otuple;
+ struct nf_conntrack_tuple *rtuple;
+
+ if (ct == NULL || nf_ct_is_untracked(ct))
+ return -1;
+
+ otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
+ info->src_mask.all);
+ t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
+ info->dst_mask.all);
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = nf_ct_protonum(ct);
+ if (t->proto != IPPROTO_ICMP) {
+ t->uports.p16.src = otuple->src.u.all;
+ t->uports.p16.dst = rtuple->src.u.all;
+ t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+ info->port_set.v32;
+ if (t->uports.p16.dst < t->uports.p16.src)
+ swap(t->uports.p16.dst, t->uports.p16.src);
+ }
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline u32
+hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+ u32 hash;
+
+ if (t->dst < t->src)
+ swap(t->src, t->dst);
+
+ hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+ hash = hash ^ (t->proto & info->proto_mask);
+
+ return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
+}
+
+static void
+hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
+ struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+ int protoff;
+
+ protoff = proto_ports_offset(t->proto);
+ if (protoff < 0)
+ return;
+
+ nhoff += protoff;
+ if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
+ return;
+
+ t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+ info->port_set.v32;
+
+ if (t->uports.p16.dst < t->uports.p16.src)
+ swap(t->uports.p16.dst, t->uports.p16.src);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
+{
+ struct icmp6hdr *icmp6h, _ih6;
+
+ icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
+ if (icmp6h == NULL)
+ return 0;
+
+ if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
+ *offset += sizeof(struct icmp6hdr);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+ struct ipv6hdr *ip6, _ip6;
+ int flag = IP6T_FH_F_AUTH;
+ unsigned int nhoff = 0;
+ u16 fragoff = 0;
+ int nexthdr;
+
+ ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
+ nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+ if (nexthdr < 0)
+ return 0;
+ /* No need to check for icmp errors on fragments */
+ if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
+ goto noicmp;
+ /* Use inner header in case of ICMP errors */
+ if (get_inner6_hdr(skb, &nhoff)) {
+ ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
+ if (ip6 == NULL)
+ return -1;
+ /* If AH present, use SPI like in ESP. */
+ flag = IP6T_FH_F_AUTH;
+ nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+ if (nexthdr < 0)
+ return -1;
+ }
+noicmp:
+ t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
+ t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = nexthdr;
+ if (t->proto == IPPROTO_ICMPV6)
+ return 0;
+
+ if (flag & IP6T_FH_F_FRAG)
+ return 0;
+
+ hmark_set_tuple_ports(skb, nhoff, t, info);
+ return 0;
+}
+
+static unsigned int
+hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+ struct hmark_tuple t;
+
+ memset(&t, 0, sizeof(struct hmark_tuple));
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+ if (hmark_ct_set_htuple(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ } else {
+ if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ }
+
+ skb->mark = hmark_hash(&t, info);
+ return XT_CONTINUE;
+}
+#endif
+
+static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
+{
+ const struct icmphdr *icmph;
+ struct icmphdr _ih;
+
+ /* Not enough header? */
+ icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
+ if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
+ return 0;
+
+ /* Error message? */
+ if (icmph->type != ICMP_DEST_UNREACH &&
+ icmph->type != ICMP_SOURCE_QUENCH &&
+ icmph->type != ICMP_TIME_EXCEEDED &&
+ icmph->type != ICMP_PARAMETERPROB &&
+ icmph->type != ICMP_REDIRECT)
+ return 0;
+
+ *nhoff += iphsz + sizeof(_ih);
+ return 1;
+}
+
+static int
+hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+ struct iphdr *ip, _ip;
+ int nhoff = skb_network_offset(skb);
+
+ ip = (struct iphdr *) (skb->data + nhoff);
+ if (ip->protocol == IPPROTO_ICMP) {
+ /* Use inner header in case of ICMP errors */
+ if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
+ ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
+ if (ip == NULL)
+ return -1;
+ }
+ }
+
+ t->src = (__force u32) ip->saddr;
+ t->dst = (__force u32) ip->daddr;
+
+ t->src &= info->src_mask.ip;
+ t->dst &= info->dst_mask.ip;
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = ip->protocol;
+
+ /* ICMP has no ports, skip */
+ if (t->proto == IPPROTO_ICMP)
+ return 0;
+
+ /* follow-up fragments don't contain ports, skip all fragments */
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ return 0;
+
+ hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
+
+ return 0;
+}
+
+static unsigned int
+hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+ struct hmark_tuple t;
+
+ memset(&t, 0, sizeof(struct hmark_tuple));
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+ if (hmark_ct_set_htuple(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ } else {
+ if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ }
+
+ skb->mark = hmark_hash(&t, info);
+ return XT_CONTINUE;
+}
+
+static int hmark_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+
+ if (!info->hmodulus) {
+ pr_info("xt_HMARK: hash modulus can't be zero\n");
+ return -EINVAL;
+ }
+ if (info->proto_mask &&
+ (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
+ pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
+ return -EINVAL;
+ }
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
+ (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
+ XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
+ pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
+ return -EINVAL;
+ }
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
+ (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
+ XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
+ pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct xt_target hmark_tg_reg[] __read_mostly = {
+ {
+ .name = "HMARK",
+ .family = NFPROTO_IPV4,
+ .target = hmark_tg_v4,
+ .targetsize = sizeof(struct xt_hmark_info),
+ .checkentry = hmark_tg_check,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "HMARK",
+ .family = NFPROTO_IPV6,
+ .target = hmark_tg_v6,
+ .targetsize = sizeof(struct xt_hmark_info),
+ .checkentry = hmark_tg_check,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init hmark_tg_init(void)
+{
+ return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+static void __exit hmark_tg_exit(void)
+{
+ xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+module_init(hmark_tg_init);
+module_exit(hmark_tg_exit);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 190ad37c5cf8..71a266de5fb4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -67,15 +67,13 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (dst_mtu(skb_dst(skb)) <= minlen) {
- if (net_ratelimit())
- pr_err("unknown or invalid path-MTU (%u)\n",
- dst_mtu(skb_dst(skb)));
+ net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+ dst_mtu(skb_dst(skb)));
return -1;
}
if (in_mtu <= minlen) {
- if (net_ratelimit())
- pr_err("unknown or invalid path-MTU (%u)\n",
- in_mtu);
+ net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+ in_mtu);
return -1;
}
newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 35a959a096e0..146033a86de8 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -282,10 +282,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
struct sock *sk;
const struct in6_addr *laddr;
__be16 lport;
- int thoff;
+ int thoff = 0;
int tproto;
- tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
return NF_DROP;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d95f9c963cde..26a668a84aa2 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -171,8 +171,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
if (ht->cfg.max && ht->count >= ht->cfg.max) {
/* FIXME: do something. question is what.. */
- if (net_ratelimit())
- pr_err("max count of %u reached\n", ht->cfg.max);
+ net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
ent = NULL;
} else
ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
@@ -388,9 +387,20 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+/* in byte mode, the lowest possible rate is one packet/second.
+ * credit_cap is used as a counter that tells us how many times we can
+ * refill the "credits available" counter when it becomes empty.
+ */
+#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
+#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
+
+static u32 xt_hashlimit_len_to_chunks(u32 len)
+{
+ return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
+}
+
/* Precision saver. */
-static inline u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -400,12 +410,53 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
}
-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+static u32 user2credits_byte(u32 user)
{
- dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
- if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
- dh->rateinfo.credit = dh->rateinfo.credit_cap;
+ u64 us = user;
+ us *= HZ * CREDITS_PER_JIFFY_BYTES;
+ return (u32) (us >> 32);
+}
+
+static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
+{
+ unsigned long delta = now - dh->rateinfo.prev;
+ u32 cap;
+
+ if (delta == 0)
+ return;
+
dh->rateinfo.prev = now;
+
+ if (mode & XT_HASHLIMIT_BYTES) {
+ u32 tmp = dh->rateinfo.credit;
+ dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
+ cap = CREDITS_PER_JIFFY_BYTES * HZ;
+ if (tmp >= dh->rateinfo.credit) {/* overflow */
+ dh->rateinfo.credit = cap;
+ return;
+ }
+ } else {
+ dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
+ cap = dh->rateinfo.credit_cap;
+ }
+ if (dh->rateinfo.credit > cap)
+ dh->rateinfo.credit = cap;
+}
+
+static void rateinfo_init(struct dsthash_ent *dh,
+ struct xt_hashlimit_htable *hinfo)
+{
+ dh->rateinfo.prev = jiffies;
+ if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
+ dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+ dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
+ dh->rateinfo.credit_cap = hinfo->cfg.burst;
+ } else {
+ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+ hinfo->cfg.burst);
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ dh->rateinfo.credit_cap = dh->rateinfo.credit;
+ }
}
static inline __be32 maskl(__be32 a, unsigned int l)
@@ -511,6 +562,21 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
}
+static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
+{
+ u64 tmp = xt_hashlimit_len_to_chunks(len);
+ tmp = tmp * dh->rateinfo.cost;
+
+ if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
+ tmp = CREDITS_PER_JIFFY_BYTES * HZ;
+
+ if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
+ dh->rateinfo.credit_cap--;
+ dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+ }
+ return (u32) tmp;
+}
+
static bool
hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -519,6 +585,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
unsigned long now = jiffies;
struct dsthash_ent *dh;
struct dsthash_dst dst;
+ u32 cost;
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
@@ -532,21 +599,21 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
goto hotdrop;
}
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
- dh->rateinfo.prev = jiffies;
- dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ rateinfo_init(dh, hinfo);
} else {
/* update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now);
+ rateinfo_recalc(dh, now, hinfo->cfg.mode);
}
- if (dh->rateinfo.credit >= dh->rateinfo.cost) {
+ if (info->cfg.mode & XT_HASHLIMIT_BYTES)
+ cost = hashlimit_byte_cost(skb->len, dh);
+ else
+ cost = dh->rateinfo.cost;
+
+ if (dh->rateinfo.credit >= cost) {
/* below the limit */
- dh->rateinfo.credit -= dh->rateinfo.cost;
+ dh->rateinfo.credit -= cost;
spin_unlock(&dh->lock);
rcu_read_unlock_bh();
return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
@@ -568,14 +635,6 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
int ret;
- /* Check for overflow. */
- if (info->cfg.burst == 0 ||
- user2credits(info->cfg.avg * info->cfg.burst) <
- user2credits(info->cfg.avg)) {
- pr_info("overflow, try lower: %u/%u\n",
- info->cfg.avg, info->cfg.burst);
- return -ERANGE;
- }
if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
return -EINVAL;
if (info->name[sizeof(info->name)-1] != '\0')
@@ -588,6 +647,26 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
}
+ if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
+ pr_info("Unknown mode mask %X, kernel too old?\n",
+ info->cfg.mode);
+ return -EINVAL;
+ }
+
+ /* Check for overflow. */
+ if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
+ if (user2credits_byte(info->cfg.avg) == 0) {
+ pr_info("overflow, rate too high: %u\n", info->cfg.avg);
+ return -EINVAL;
+ }
+ } else if (info->cfg.burst == 0 ||
+ user2credits(info->cfg.avg * info->cfg.burst) <
+ user2credits(info->cfg.avg)) {
+ pr_info("overflow, try lower: %u/%u\n",
+ info->cfg.avg, info->cfg.burst);
+ return -ERANGE;
+ }
+
mutex_lock(&hashlimit_mutex);
info->hinfo = htable_find_get(net, info->name, par->family);
if (info->hinfo == NULL) {
@@ -680,10 +759,11 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
int res;
+ const struct xt_hashlimit_htable *ht = s->private;
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
- rateinfo_recalc(ent, jiffies);
+ rateinfo_recalc(ent, jiffies, ht->cfg.mode);
switch (family) {
case NFPROTO_IPV4:
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 32b7a579a032..5c22ce8ab309 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
/* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -123,7 +122,7 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
128. */
priv->prev = jiffies;
priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
+ r->credit_cap = priv->credit; /* Credits full. */
r->cost = user2credits(r->avg);
}
return 0;
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index 8160f6b1435d..d5b4fd4f91ed 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -36,7 +36,7 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (skb_mac_header(skb) + ETH_HLEN > skb->data)
return false;
- ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
+ ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
ret ^= info->invert;
return ret;
}
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 0ec8138aa470..035960ec5cb9 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -44,6 +44,14 @@ const struct ip_set_adt_opt n = { \
.cmdflags = cfs, \
.timeout = t, \
}
+#define ADT_MOPT(n, f, d, fs, cfs, t) \
+struct ip_set_adt_opt n = { \
+ .family = f, \
+ .dim = d, \
+ .flags = fs, \
+ .cmdflags = cfs, \
+ .timeout = t, \
+}
/* Revision 0 interface: backward compatible with netfilter/iptables */
@@ -296,11 +304,14 @@ static unsigned int
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v2 *info = par->targinfo;
- ADT_OPT(add_opt, par->family, info->add_set.dim,
- info->add_set.flags, info->flags, info->timeout);
+ ADT_MOPT(add_opt, par->family, info->add_set.dim,
+ info->add_set.flags, info->flags, info->timeout);
ADT_OPT(del_opt, par->family, info->del_set.dim,
info->del_set.flags, 0, UINT_MAX);
+ /* Normalize to fit into jiffies */
+ if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+ add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 72bb07f57f97..9ea482d08cf7 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -263,10 +263,10 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk;
struct in6_addr *daddr, *saddr;
__be16 dport, sport;
- int thoff, tproto;
+ int thoff = 0, tproto;
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
- tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
return NF_DROP;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f86de29979ef..2c74daa5aca5 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -321,7 +321,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
return -ENOMEM;
nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
- if (!skb)
+ if (!nskb)
return -ENOMEM;
nskb->vlan_tci = 0;
@@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
return validate_actions(actions, key, depth + 1);
}
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+ if (flow_key->eth.type == htons(ETH_P_IP)) {
+ if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+ return 0;
+ } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+ if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key)
{
@@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
if (flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
-
- break;
+ return validate_tp_port(flow_key);
case OVS_KEY_ATTR_UDP:
if (flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
- break;
+ return validate_tp_port(flow_key);
default:
return -EINVAL;
@@ -1647,10 +1655,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
OVS_VPORT_CMD_NEW);
if (IS_ERR(reply)) {
- err = PTR_ERR(reply);
netlink_set_err(init_net.genl_sock, 0,
- ovs_dp_vport_multicast_group.id, err);
- return 0;
+ ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+ goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 7cb416381e87..6d4d8097cf96 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
u8 tcp_flags = 0;
if (flow->key.eth.type == htons(ETH_P_IP) &&
- flow->key.ip.proto == IPPROTO_TCP) {
+ flow->key.ip.proto == IPPROTO_TCP &&
+ likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
}
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 5920bda4ab6b..3fd6c0d88e12 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -157,9 +157,9 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
int len;
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
- if (net_ratelimit())
- pr_warn("%s: dropped over-mtu packet: %d > %d\n",
- ovs_dp_name(vport->dp), packet_length(skb), mtu);
+ net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+ ovs_dp_name(vport->dp),
+ packet_length(skb), mtu);
goto error;
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f81d53d..e7a8976bf25c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,28 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
+config NET_SCH_FQ_CODEL
+ tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
+ help
+ Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_fq_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2b51d3..5940a1992f0d 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,8 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
+obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 882124ceb70c..2c8ad7c86e43 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -397,7 +397,7 @@ static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
while (len > 1) {
switch (xh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
case IPV6_TLV_JUMBO:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 0beba0e5312e..60e281ad0f07 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
/*
- * net/sched/ipt.c iptables target interface
+ * net/sched/ipt.c iptables target interface
*
*TODO: Add other tables. For now we only support the ipv4 table targets
*
@@ -235,9 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
result = TC_ACT_PIPE;
break;
default:
- if (net_ratelimit())
- pr_notice("tc filter: Bogus netfilter code"
- " %d assume ACCEPT\n", ret);
+ net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
+ ret);
result = TC_POLICE_OK;
break;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d583aea3b3df..fe81cc18e9e0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -174,9 +174,8 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
}
if (!(dev->flags & IFF_UP)) {
- if (net_ratelimit())
- pr_notice("tc mirred to Houston: device %s is down\n",
- dev->name);
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
goto out;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 591b006a8c5a..d45373fb00b9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -234,8 +234,7 @@ out:
return -1;
deadloop:
- if (net_ratelimit())
- pr_warning("cls_u32: dead loop\n");
+ net_warn_ratelimited("cls_u32: dead loop\n");
return -1;
}
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index aca233c2b848..3a633debb6df 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -537,9 +537,7 @@ pop_stack:
return res;
stack_overflow:
- if (net_ratelimit())
- pr_warning("tc ematch: local stack overflow,"
- " increase NET_EMATCH_STACK\n");
+ net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d2daefcc205f..085ce53d570a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1691,12 +1691,10 @@ reclassify:
tp = otp;
if (verd++ >= MAX_REC_LOOP) {
- if (net_ratelimit())
- pr_notice("%s: packet reclassify loop"
- " rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff,
- ntohs(tp->protocol));
+ net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
+ tp->q->ops->id,
+ tp->prio & 0xffff,
+ ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index a77a4fbc069a..8522a4793374 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -423,8 +423,6 @@ drop: __maybe_unused
}
return ret;
}
- qdisc_bstats_update(sch, skb);
- bstats_update(&flow->bstats, skb);
/*
* Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc
@@ -472,6 +470,8 @@ static void sch_atm_dequeue(unsigned long data)
if (unlikely(!skb))
break;
+ qdisc_bstats_update(sch, skb);
+ bstats_update(&flow->bstats, skb);
pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
/* remove any LL header somebody else has attached */
skb_pull(skb, skb_network_offset(skb));
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 000000000000..2f9ab17db85a
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,276 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ * Implemented on linux by :
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/prefetch.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_ECN,
+ q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ const struct codel_sched_data *q = qdisc_priv(sch);
+ struct tc_codel_xstats st = {
+ .maxpacket = q->stats.maxpacket,
+ .count = q->vars.count,
+ .lastcount = q->vars.lastcount,
+ .drop_overlimit = q->drop_overlimit,
+ .ldelay = codel_time_to_us(q->vars.ldelay),
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ if (q->vars.dropping) {
+ codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+ if (delta >= 0)
+ st.drop_next = codel_time_to_us(delta);
+ else
+ st.drop_next = -codel_time_to_us(-delta);
+ }
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index c2189879359b..9ce0b4fe23ff 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,8 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum;
}
- bstats_update(&cl->bstats, skb);
-
sch->q.qlen++;
return err;
}
@@ -403,6 +401,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+
+ bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
new file mode 100644
index 000000000000..9fc1c62ec80e
--- /dev/null
+++ b/net/sched/sch_fq_codel.c
@@ -0,0 +1,626 @@
+/*
+ * Fair Queue CoDel discipline
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/flow_keys.h>
+#include <net/codel.h>
+
+/* Fair Queue CoDel.
+ *
+ * Principles :
+ * Packets are classified (internal classifier or external) on flows.
+ * This is a Stochastic model (as we use a hash, several flows
+ * might be hashed on same slot)
+ * Each flow has a CoDel managed queue.
+ * Flows are linked onto two (Round Robin) lists,
+ * so that new flows have priority on old ones.
+ *
+ * For a given flow, packets are not reordered (CoDel uses a FIFO)
+ * head drops only.
+ * ECN capability is on by default.
+ * Low memory footprint (64 bytes per flow)
+ */
+
+struct fq_codel_flow {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head flowchain;
+ int deficit;
+ u32 dropped; /* number of drops (or ECN marks) on this flow */
+ struct codel_vars cvars;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct fq_codel_sched_data {
+ struct tcf_proto *filter_list; /* optional external classifier */
+ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
+ u32 *backlogs; /* backlog table [flows_cnt] */
+ u32 flows_cnt; /* number of flows */
+ u32 perturbation; /* hash perturbation */
+ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
+ struct codel_params cparams;
+ struct codel_stats cstats;
+ u32 drop_overlimit;
+ u32 new_flow_count;
+
+ struct list_head new_flows; /* list of new flows */
+ struct list_head old_flows; /* list of old flows */
+};
+
+static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
+ const struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ unsigned int hash;
+
+ skb_flow_dissect(skb, &keys);
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src ^ keys.ip_proto,
+ (__force u32)keys.ports, q->perturbation);
+ return ((u64)hash * q->flows_cnt) >> 32;
+}
+
+static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
+ int *qerr)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->flows_cnt)
+ return TC_H_MIN(skb->priority);
+
+ if (!q->filter_list)
+ return fq_codel_hash(q, skb) + 1;
+
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return 0;
+ }
+#endif
+ if (TC_H_MIN(res.classid) <= q->flows_cnt)
+ return TC_H_MIN(res.classid);
+ }
+ return 0;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+
+/* remove one skb from head of slot queue */
+static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ flow->head = skb->next;
+ skb->next = NULL;
+ return skb;
+}
+
+/* add skb to flow queue (tail add) */
+static inline void flow_queue_add(struct fq_codel_flow *flow,
+ struct sk_buff *skb)
+{
+ if (flow->head == NULL)
+ flow->head = skb;
+ else
+ flow->tail->next = skb;
+ flow->tail = skb;
+ skb->next = NULL;
+}
+
+static unsigned int fq_codel_drop(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ unsigned int maxbacklog = 0, idx = 0, i, len;
+ struct fq_codel_flow *flow;
+
+ /* Queue is full! Find the fat flow and drop packet from it.
+ * This might sound expensive, but with 1024 flows, we scan
+ * 4KB of memory, and we dont need to handle a complex tree
+ * in fast path (packet queue/enqueue) with many cache misses.
+ */
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (q->backlogs[i] > maxbacklog) {
+ maxbacklog = q->backlogs[i];
+ idx = i;
+ }
+ }
+ flow = &q->flows[idx];
+ skb = dequeue_head(flow);
+ len = qdisc_pkt_len(skb);
+ q->backlogs[idx] -= len;
+ kfree_skb(skb);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ flow->dropped++;
+ return idx;
+}
+
+static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int idx;
+ struct fq_codel_flow *flow;
+ int uninitialized_var(ret);
+
+ idx = fq_codel_classify(skb, sch, &ret);
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+ }
+ idx--;
+
+ codel_set_enqueue_time(skb);
+ flow = &q->flows[idx];
+ flow_queue_add(flow, skb);
+ q->backlogs[idx] += qdisc_pkt_len(skb);
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ if (list_empty(&flow->flowchain)) {
+ list_add_tail(&flow->flowchain, &q->new_flows);
+ codel_vars_init(&flow->cvars);
+ q->new_flow_count++;
+ flow->deficit = q->quantum;
+ flow->dropped = 0;
+ }
+ if (++sch->q.qlen < sch->limit)
+ return NET_XMIT_SUCCESS;
+
+ q->drop_overlimit++;
+ /* Return Congestion Notification only if we dropped a packet
+ * from this flow.
+ */
+ if (fq_codel_drop(sch) == idx)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
+}
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct fq_codel_flow *flow;
+ struct sk_buff *skb = NULL;
+
+ flow = container_of(vars, struct fq_codel_flow, cvars);
+ if (flow->head) {
+ skb = dequeue_head(flow);
+ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ }
+ return skb;
+}
+
+static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ struct fq_codel_flow *flow;
+ struct list_head *head;
+ u32 prev_drop_count, prev_ecn_mark;
+
+begin:
+ head = &q->new_flows;
+ if (list_empty(head)) {
+ head = &q->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+ flow = list_first_entry(head, struct fq_codel_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += q->quantum;
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ goto begin;
+ }
+
+ prev_drop_count = q->cstats.drop_count;
+ prev_ecn_mark = q->cstats.ecn_mark;
+
+ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ dequeue);
+
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ else
+ list_del_init(&flow->flowchain);
+ goto begin;
+ }
+ qdisc_bstats_update(sch, skb);
+ flow->deficit -= qdisc_pkt_len(skb);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->cstats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+ }
+ return skb;
+}
+
+static void fq_codel_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_codel_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
+ [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
+};
+
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
+ if (err < 0)
+ return err;
+ if (tb[TCA_FQ_CODEL_FLOWS]) {
+ if (q->flows)
+ return -EINVAL;
+ q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
+ if (!q->flows_cnt ||
+ q->flows_cnt > 65536)
+ return -EINVAL;
+ }
+ sch_tree_lock(sch);
+
+ if (tb[TCA_FQ_CODEL_TARGET]) {
+ u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
+
+ q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_INTERVAL]) {
+ u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
+
+ q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+
+ if (tb[TCA_FQ_CODEL_ECN])
+ q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+
+ if (tb[TCA_FQ_CODEL_QUANTUM])
+ q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
+
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_codel_dequeue(sch);
+
+ kfree_skb(skb);
+ q->cstats.drop_count++;
+ }
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static void *fq_codel_zalloc(size_t sz)
+{
+ void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+ if (!ptr)
+ ptr = vzalloc(sz);
+ return ptr;
+}
+
+static void fq_codel_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static void fq_codel_destroy(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ fq_codel_free(q->backlogs);
+ fq_codel_free(q->flows);
+}
+
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
+
+ sch->limit = 10*1024;
+ q->flows_cnt = 1024;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->perturbation = net_random();
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ codel_params_init(&q->cparams);
+ codel_stats_init(&q->cstats);
+ q->cparams.ecn = true;
+
+ if (opt) {
+ int err = fq_codel_change(sch, opt);
+ if (err)
+ return err;
+ }
+
+ if (!q->flows) {
+ q->flows = fq_codel_zalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow));
+ if (!q->flows)
+ return -ENOMEM;
+ q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+ if (!q->backlogs) {
+ fq_codel_free(q->flows);
+ return -ENOMEM;
+ }
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ }
+ }
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
+ codel_time_to_us(q->cparams.target)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
+ codel_time_to_us(q->cparams.interval)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_ECN,
+ q->cparams.ecn) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
+ q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
+ q->flows_cnt))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tc_fq_codel_xstats st = {
+ .type = TCA_FQ_CODEL_XSTATS_QDISC,
+ };
+ struct list_head *pos;
+
+ st.qdisc_stats.maxpacket = q->cstats.maxpacket;
+ st.qdisc_stats.drop_overlimit = q->drop_overlimit;
+ st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
+ st.qdisc_stats.new_flow_count = q->new_flow_count;
+
+ list_for_each(pos, &q->new_flows)
+ st.qdisc_stats.new_flows_len++;
+
+ list_for_each(pos, &q->old_flows)
+ st.qdisc_stats.old_flows_len++;
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static void fq_codel_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ u32 idx = cl - 1;
+ struct gnet_stats_queue qs = { 0 };
+ struct tc_fq_codel_xstats xstats;
+
+ if (idx < q->flows_cnt) {
+ const struct fq_codel_flow *flow = &q->flows[idx];
+ const struct sk_buff *skb = flow->head;
+
+ memset(&xstats, 0, sizeof(xstats));
+ xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
+ xstats.class_stats.deficit = flow->deficit;
+ xstats.class_stats.ldelay =
+ codel_time_to_us(flow->cvars.ldelay);
+ xstats.class_stats.count = flow->cvars.count;
+ xstats.class_stats.lastcount = flow->cvars.lastcount;
+ xstats.class_stats.dropping = flow->cvars.dropping;
+ if (flow->cvars.dropping) {
+ codel_tdiff_t delta = flow->cvars.drop_next -
+ codel_get_time();
+
+ xstats.class_stats.drop_next = (delta >= 0) ?
+ codel_time_to_us(delta) :
+ -codel_time_to_us(-delta);
+ }
+ while (skb) {
+ qs.qlen++;
+ skb = skb->next;
+ }
+ qs.backlog = q->backlogs[idx];
+ qs.drops = flow->dropped;
+ }
+ if (gnet_stats_copy_queue(d, &qs) < 0)
+ return -1;
+ if (idx < q->flows_cnt)
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+ return 0;
+}
+
+static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (list_empty(&q->flows[i].flowchain) ||
+ arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops fq_codel_class_ops = {
+ .leaf = fq_codel_leaf,
+ .get = fq_codel_get,
+ .put = fq_codel_put,
+ .tcf_chain = fq_codel_find_tcf,
+ .bind_tcf = fq_codel_bind,
+ .unbind_tcf = fq_codel_put,
+ .dump = fq_codel_dump_class,
+ .dump_stats = fq_codel_dump_class_stats,
+ .walk = fq_codel_walk,
+};
+
+static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
+ .cl_ops = &fq_codel_class_ops,
+ .id = "fq_codel",
+ .priv_size = sizeof(struct fq_codel_sched_data),
+ .enqueue = fq_codel_enqueue,
+ .dequeue = fq_codel_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .drop = fq_codel_drop,
+ .init = fq_codel_init,
+ .reset = fq_codel_reset,
+ .destroy = fq_codel_destroy,
+ .change = fq_codel_change,
+ .dump = fq_codel_dump,
+ .dump_stats = fq_codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init fq_codel_module_init(void)
+{
+ return register_qdisc(&fq_codel_qdisc_ops);
+}
+
+static void __exit fq_codel_module_exit(void)
+{
+ unregister_qdisc(&fq_codel_qdisc_ops);
+}
+
+module_init(fq_codel_module_init)
+module_exit(fq_codel_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0eb1202c22a6..511323e89cec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -86,9 +86,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* deadloop is detected. Return OK to try the next skb.
*/
kfree_skb(skb);
- if (net_ratelimit())
- pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
- dev_queue->dev->name);
+ net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
+ dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
@@ -136,9 +135,9 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = handle_dev_cpu_collision(skb, txq, q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
- if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
- pr_warning("BUG %s code %d qlen %d\n",
- dev->name, ret, q->q.qlen);
+ if (unlikely(ret != NETDEV_TX_BUSY))
+ net_warn_ratelimited("BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index ab620bf90785..e901583e4ea5 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -255,10 +255,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
- if (net_ratelimit())
- pr_warning("GRED: Unable to relocate VQ 0x%x "
- "after dequeue, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
+ tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
@@ -287,10 +285,8 @@ static unsigned int gred_drop(struct Qdisc *sch)
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
- if (net_ratelimit())
- pr_warning("GRED: Unable to relocate VQ 0x%x "
- "while dropping, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
+ tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 8db3e2c72827..6c2ec4510540 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1609,7 +1609,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
- bstats_update(&cl->bstats, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1657,6 +1656,7 @@ hfsc_dequeue(struct Qdisc *sch)
return NULL;
}
+ bstats_update(&cl->bstats, skb);
update_vf(cl, qdisc_pkt_len(skb), cur_time);
if (realtime)
cl->cl_cumul += qdisc_pkt_len(skb);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index acae5b0e3849..9d75b7761313 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
return ret;
} else {
- bstats_update(&cl->bstats, skb);
htb_activate(q, cl);
}
@@ -835,6 +834,7 @@ next:
} while (cl != start);
if (likely(skb != NULL)) {
+ bstats_update(&cl->bstats, skb);
cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
if (cl->un.leaf.deficit[level] < 0) {
cl->un.leaf.deficit[level] += cl->quantum;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 69534c5f8afa..f1b7d4bb591e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
skb_set_owner_w(nskb, sk);
- /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
- if (!dst || (dst->obsolete > 1)) {
- dst_release(dst);
+ if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
sctp_assoc_sync_pmtu(asoc);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fbb374c65945..c96d1a81cf42 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1161,9 +1161,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_DISPOSITION_VIOLATION:
- if (net_ratelimit())
- pr_err("protocol violation state %d chunkid %d\n",
- state, subtype.chunk);
+ net_err_ratelimited("protocol violation state %d chunkid %d\n",
+ state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a147b4d307d2..9fca10357350 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1129,17 +1129,15 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
- if (net_ratelimit())
- pr_warn("%s association %p could not find address %pI6\n",
- __func__,
- asoc,
- &from_addr.v6.sin6_addr);
+ net_warn_ratelimited("%s association %p could not find address %pI6\n",
+ __func__,
+ asoc,
+ &from_addr.v6.sin6_addr);
} else {
- if (net_ratelimit())
- pr_warn("%s association %p could not find address %pI4\n",
- __func__,
- asoc,
- &from_addr.v4.sin_addr.s_addr);
+ net_warn_ratelimited("%s association %p could not find address %pI4\n",
+ __func__,
+ asoc,
+ &from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 92ba71dfe080..b3b8a8d813eb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5840,10 +5840,8 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
- if (net_ratelimit()) {
- pr_info("failed to load transform for %s: %ld\n",
- sctp_hmac_alg, PTR_ERR(tfm));
- }
+ net_info_ratelimited("failed to load transform for %s: %ld\n",
+ sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b04..b026ba0c6992 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-/* this is a complete rip-off from __sk_dst_check
- * the cookie is always 0 since this is how it's used in the
- * pmtu code
- */
-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
-{
- struct dst_entry *dst = t->dst;
-
- if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
- dst_release(t->dst);
- t->dst = NULL;
- return NULL;
- }
-
- return dst;
-}
-
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
diff --git a/net/socket.c b/net/socket.c
index d3aaa4f67a3b..2a2898ce596e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1234,8 +1234,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
*/
sock = sock_alloc();
if (!sock) {
- if (net_ratelimit())
- printk(KERN_WARNING "socket: no more sockets\n");
+ net_warn_ratelimited("socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c7..782bfe1b6465 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
{
struct gss_api_mech *pos = NULL;
- int i = 0;
+ int j, i = 0;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
- array_ptr[i] = pos->gm_pfs->pseudoflavor;
- i++;
+ for (j=0; j < pos->gm_pf_num; j++) {
+ array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
+ }
}
spin_unlock(&registered_mechs_lock);
return i;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index adf2990acebf..7fee13b331d1 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -127,9 +127,7 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
{
static uint32_t clntid;
char name[15];
- struct qstr q = {
- .name = name,
- };
+ struct qstr q = { .name = name };
struct dentry *dir, *dentry;
int error;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 3b62cf288031..fd2423991c2d 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1059,12 +1059,9 @@ static const struct rpc_filelist files[] = {
struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
- struct qstr dir = {
- .name = dir_name,
- .len = strlen(dir_name),
- .hash = full_name_hash(dir_name, strlen(dir_name)),
- };
+ struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
+ dir.hash = full_name_hash(dir.name, dir.len);
return d_lookup(sb->s_root, &dir);
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 4153846984ac..017c0117d154 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1041,23 +1041,21 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
* Printk the given error with the address of the client that caused it.
*/
static __printf(2, 3)
-int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
+void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
- int r;
char buf[RPC_MAX_ADDRBUFLEN];
- if (!net_ratelimit())
- return 0;
+ va_start(args, fmt);
- printk(KERN_WARNING "svc: %s: ",
- svc_print_addr(rqstp, buf, sizeof(buf)));
+ vaf.fmt = fmt;
+ vaf.va = &args;
- va_start(args, fmt);
- r = vprintk(fmt, args);
- va_end(args);
+ net_warn_ratelimited("svc: %s: %pV",
+ svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
- return r;
+ va_end(args);
}
/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4bda09d7e1a4..b98ee3514912 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -544,14 +544,11 @@ static void svc_check_conn_limits(struct svc_serv *serv)
struct svc_xprt *xprt = NULL;
spin_lock_bh(&serv->sv_lock);
if (!list_empty(&serv->sv_tempsocks)) {
- if (net_ratelimit()) {
- /* Try to help the admin */
- printk(KERN_NOTICE "%s: too many open "
- "connections, consider increasing %s\n",
- serv->sv_name, serv->sv_maxconn ?
- "the max number of connections." :
- "the number of threads.");
- }
+ /* Try to help the admin */
+ net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
+ serv->sv_name, serv->sv_maxconn ?
+ "max number of connections" :
+ "number of threads");
/*
* Always select the oldest connection. It's not fair,
* but so is life
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f0132b2e875e..a6de09de5d21 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -617,11 +617,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_prot = IPPROTO_UDP;
if (!svc_udp_get_dest_address(rqstp, cmh)) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "svc: received unknown control message %d/%d; "
- "dropping RPC reply datagram\n",
- cmh->cmsg_level, cmh->cmsg_type);
+ net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
+ cmh->cmsg_level, cmh->cmsg_type);
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
@@ -871,18 +868,17 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
if (err == -ENOMEM)
printk(KERN_WARNING "%s: no more sockets!\n",
serv->sv_name);
- else if (err != -EAGAIN && net_ratelimit())
- printk(KERN_WARNING "%s: accept failed (err %d)!\n",
- serv->sv_name, -err);
+ else if (err != -EAGAIN)
+ net_warn_ratelimited("%s: accept failed (err %d)!\n",
+ serv->sv_name, -err);
return NULL;
}
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
err = kernel_getpeername(newsock, sin, &slen);
if (err < 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "%s: peername failed (err %d)!\n",
- serv->sv_name, -err);
+ net_warn_ratelimited("%s: peername failed (err %d)!\n",
+ serv->sv_name, -err);
goto failed; /* aborted connection or whatever */
}
@@ -1012,19 +1008,15 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
* bit set in the fragment length header.
* But apparently no known nfs clients send fragmented
* records. */
- if (net_ratelimit())
- printk(KERN_NOTICE "RPC: multiple fragments "
- "per record not supported\n");
+ net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
goto err_delete;
}
svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
if (svsk->sk_reclen > serv->sv_max_mesg) {
- if (net_ratelimit())
- printk(KERN_NOTICE "RPC: "
- "fragment too large: 0x%08lx\n",
- (unsigned long)svsk->sk_reclen);
+ net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
+ (unsigned long)svsk->sk_reclen);
goto err_delete;
}
}
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index f3e813a8d107..e3a6e37cd1c5 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -26,10 +26,6 @@
#include <linux/if_ether.h>
#endif
-#ifdef CONFIG_TR
-#include <linux/if_tr.h>
-#endif
-
static struct ctl_table_set *
net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
{
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 30f20fe4a5fe..d2a19b0ff71f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -473,7 +473,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
/* fixed already - and no change */
if (wdev->wext.ibss.bssid && bssid &&
- compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
+ ether_addr_equal(bssid, wdev->wext.ibss.bssid))
return 0;
wdev_lock(wdev);
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 755738d26bb4..1526c211db66 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -304,10 +304,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n",
+ hdr->addr2);
key->dot11RSNAStatsCCMPFormatErrors++;
return -2;
}
@@ -318,11 +316,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -6;
}
if (!key->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
+ net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
+ hdr->addr2, keyidx);
return -3;
}
@@ -336,15 +331,11 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (ccmp_replay_check(pn, key->rx_pn)) {
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: replay detected: STA=%pM "
- "previous PN %02x%02x%02x%02x%02x%02x "
- "received PN %02x%02x%02x%02x%02x%02x\n",
- hdr->addr2,
- key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
- key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
- pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
- }
+ net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n",
+ hdr->addr2,
+ key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
+ key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
+ pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
#endif
key->dot11RSNAStatsCCMPReplays++;
return -4;
@@ -370,10 +361,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: decrypt failed: STA="
- "%pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n",
+ hdr->addr2);
key->dot11RSNAStatsCCMPDecryptErrors++;
return -5;
}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 38734846c19e..d475cfc8568f 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -360,12 +360,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct scatterlist sg;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- if (net_ratelimit()) {
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *)skb->data;
- printk(KERN_DEBUG ": TKIP countermeasures: dropped "
- "TX packet to %pM\n", hdr->addr1);
- }
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n",
+ hdr->addr1);
return -1;
}
@@ -420,10 +417,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct ieee80211_hdr *)skb->data;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP countermeasures: dropped "
- "received packet from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n",
+ hdr->addr2);
return -1;
}
@@ -433,10 +428,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n",
+ hdr->addr2);
return -2;
}
keyidx >>= 6;
@@ -446,11 +439,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -6;
}
if (!tkey->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
+ net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n",
+ hdr->addr2, keyidx);
return -3;
}
iv16 = (pos[0] << 8) | pos[2];
@@ -459,12 +449,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
- " previous TSC %08x%04x received TSC "
- "%08x%04x\n", hdr->addr2,
- tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
- }
+ net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n",
+ hdr->addr2, tkey->rx_iv32, tkey->rx_iv16,
+ iv32, iv16);
#endif
tkey->dot11RSNAStatsTKIPReplays++;
return -4;
@@ -481,11 +468,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP: failed to decrypt "
- "received packet from %pM\n",
- hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
+ hdr->addr2);
return -7;
}
@@ -501,10 +485,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->rx_phase1_done = 0;
}
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: ICV error detected: STA="
- "%pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n",
+ hdr->addr2);
#endif
tkey->dot11RSNAStatsTKIPICVErrors++;
return -5;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 6801d96bc224..eb90988bbd36 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -101,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
if (wdev->current_bss &&
- compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -116,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
- from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0;
+ from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
} else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -155,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
return;
if (wdev->current_bss &&
- compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_sme_disassoc(dev, wdev->current_bss);
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
@@ -166,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0;
+ from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
}
EXPORT_SYMBOL(__cfg80211_send_disassoc);
@@ -286,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
return -EINVAL;
if (wdev->current_bss &&
- compare_ether_addr(bssid, wdev->current_bss->pub.bssid) == 0)
+ ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
return -EALREADY;
memset(&req, 0, sizeof(req));
@@ -363,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
if (wdev->current_bss && prev_bssid &&
- compare_ether_addr(wdev->current_bss->pub.bssid, prev_bssid) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
/*
* Trying to reassociate: Allow this to proceed and let the old
* association to be dropped when the new one is completed.
@@ -447,8 +447,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
if (local_state_change) {
if (wdev->current_bss &&
- compare_ether_addr(wdev->current_bss->pub.bssid, bssid)
- == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -497,7 +496,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
- if (compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0)
+ if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
req.bss = &wdev->current_bss->pub;
else
return -ENOTCONN;
@@ -760,8 +759,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
break;
}
- if (compare_ether_addr(wdev->current_bss->pub.bssid,
- mgmt->bssid)) {
+ if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ mgmt->bssid)) {
err = -ENOTCONN;
break;
}
@@ -774,8 +773,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
break;
/* for station, check that DA is the AP */
- if (compare_ether_addr(wdev->current_bss->pub.bssid,
- mgmt->da)) {
+ if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ mgmt->da)) {
err = -ENOTCONN;
break;
}
@@ -783,11 +782,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP_VLAN:
- if (compare_ether_addr(mgmt->bssid, dev->dev_addr))
+ if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (compare_ether_addr(mgmt->sa, mgmt->bssid)) {
+ if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) {
err = -EINVAL;
break;
}
@@ -806,7 +805,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return err;
}
- if (compare_ether_addr(mgmt->sa, dev->dev_addr) != 0)
+ if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
return -EINVAL;
/* Transmit the Action frame as requested by user space */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 9dee87c0358c..af2b1caa37fa 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -281,7 +281,7 @@ static bool is_bss(struct cfg80211_bss *a,
{
const u8 *ssidie;
- if (bssid && compare_ether_addr(a->bssid, bssid))
+ if (bssid && !ether_addr_equal(a->bssid, bssid))
return false;
if (!ssid)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 177df03064cf..55d99466babb 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -370,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
iftype != NL80211_IFTYPE_P2P_CLIENT &&
iftype != NL80211_IFTYPE_MESH_POINT) ||
(is_multicast_ether_addr(dst) &&
- !compare_ether_addr(src, addr)))
+ ether_addr_equal(src, addr)))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
@@ -398,9 +398,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
payload = skb->data + hdrlen;
ethertype = (payload[6] << 8) | payload[7];
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ if (likely((ether_addr_equal(payload, rfc1042_header) &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
+ ether_addr_equal(payload, bridge_tunnel_header))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
skb_pull(skb, hdrlen + 6);
@@ -609,10 +609,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
payload = frame->data;
ethertype = (payload[6] << 8) | payload[7];
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ if (likely((ether_addr_equal(payload, rfc1042_header) &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload,
- bridge_tunnel_header) == 0)) {
+ ether_addr_equal(payload, bridge_tunnel_header))) {
/* remove RFC1042 or Bridge-Tunnel
* encapsulation and replace EtherType */
skb_pull(frame, 6);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7c01c2f3b6cf..7decbd357d51 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -276,7 +276,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
/* fixed already - and no change */
if (wdev->wext.connect.bssid && bssid &&
- compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0)
+ ether_addr_equal(bssid, wdev->wext.connect.bssid))
goto out;
err = __cfg80211_disconnect(rdev, dev,
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 5d643a548feb..33bef22e44e9 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -203,7 +203,7 @@ void wireless_spy_update(struct net_device * dev,
/* Update all records that match */
for (i = 0; i < spydata->spy_number; i++)
- if (!compare_ether_addr(address, spydata->spy_address[i])) {
+ if (ether_addr_equal(address, spydata->spy_address[i])) {
memcpy(&(spydata->spy_stat[i]), wstats,
sizeof(struct iw_quality));
match = i;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 6d081674515f..ce90b8d92365 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -3,12 +3,17 @@
#
config XFRM
bool
- select CRYPTO
depends on NET
+config XFRM_ALGO
+ tristate
+ select XFRM
+ select CRYPTO
+
config XFRM_USER
tristate "Transformation user configuration interface"
- depends on INET && XFRM
+ depends on INET
+ select XFRM_ALGO
---help---
Support for Transformation(XFRM) user configuration interface
like IPsec used by native Linux tools.
@@ -48,13 +53,13 @@ config XFRM_STATISTICS
config XFRM_IPCOMP
tristate
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_DEFLATE
config NET_KEY
tristate "PF_KEY sockets"
- select XFRM
+ select XFRM_ALGO
---help---
PF_KEYv2 socket family, compatible to KAME ones.
They are required if you are going to use IPsec tools ported
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index aa429eefe919..c0e961983f17 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -3,8 +3,9 @@
#
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
- xfrm_input.o xfrm_output.o xfrm_algo.o \
+ xfrm_input.o xfrm_output.o \
xfrm_sysctl.o xfrm_replay.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
+obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 791ab2e77f3f..4ce2d93162c1 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -15,9 +15,6 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
-#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
-#include <net/ah.h>
-#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
#include <net/esp.h>
#endif
@@ -752,3 +749,5 @@ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
}
EXPORT_SYMBOL_GPL(pskb_put);
#endif
+
+MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7661576b6f45..3c87a1c4066f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -56,7 +56,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
-static inline int
+static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
@@ -69,7 +69,7 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
}
-static inline int
+static inline bool
__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi6 *fl6 = &fl->u.ip6;
@@ -82,8 +82,8 @@ __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
}
-int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
- unsigned short family)
+bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+ unsigned short family)
{
switch (family) {
case AF_INET:
@@ -91,7 +91,7 @@ int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
case AF_INET6:
return __xfrm6_selector_match(sel, fl);
}
- return 0;
+ return false;
}
static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
@@ -877,7 +877,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
u8 type, u16 family, int dir)
{
const struct xfrm_selector *sel = &pol->selector;
- int match, ret = -ESRCH;
+ int ret = -ESRCH;
+ bool match;
if (pol->family != family ||
(fl->flowi_mark & pol->mark.m) != pol->mark.v ||
@@ -1006,8 +1007,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
read_lock_bh(&xfrm_policy_lock);
if ((pol = sk->sk_policy[dir]) != NULL) {
- int match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl,
+ sk->sk_family);
int err = 0;
if (match) {
@@ -2767,8 +2768,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
- const struct xfrm_selector *sel_tgt)
+static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+ const struct xfrm_selector *sel_tgt)
{
if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
if (sel_tgt->family == sel_cmp->family &&
@@ -2778,14 +2779,14 @@ static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
sel_cmp->family) == 0 &&
sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return 1;
+ return true;
}
} else {
if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,