diff options
Diffstat (limited to 'net')
55 files changed, 716 insertions, 586 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c index f1611a1e06a7..539e6064e6d4 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -215,3 +215,5 @@ struct net_device *alloc_fddidev(int sizeof_priv) return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup); } EXPORT_SYMBOL(alloc_fddidev); + +MODULE_LICENSE("GPL"); diff --git a/net/802/tr.c b/net/802/tr.c index e7eb13084d71..e874447ad144 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -561,6 +561,9 @@ static int rif_seq_show(struct seq_file *seq, void *v) } seq_putc(seq, '\n'); } + + if (dev) + dev_put(dev); } return 0; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2b7390e377b3..d1e10546eb85 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -492,6 +492,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, continue; dev_change_flags(vlandev, flgs & ~IFF_UP); + vlan_transfer_operstate(dev, vlandev); } break; @@ -507,6 +508,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, continue; dev_change_flags(vlandev, flgs | IFF_UP); + vlan_transfer_operstate(dev, vlandev); } break; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 654e45f5719d..c67fe6f75653 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -121,8 +121,10 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, if (!skb) return NET_RX_DROP; - if (netpoll_rx_on(skb)) + if (netpoll_rx_on(skb)) { + skb->protocol = eth_type_trans(skb, skb->dev); return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); + } return napi_frags_finish(napi, skb, vlan_gro_common(napi, grp, vlan_tci, skb)); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 1b34135cf990..b4b9068e55a7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -462,6 +462,7 @@ static int vlan_dev_open(struct net_device *dev) if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_join(dev); + netif_carrier_on(dev); return 0; clear_allmulti: @@ -471,6 +472,7 @@ del_unicast: if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); out: + netif_carrier_off(dev); return err; } @@ -492,6 +494,7 @@ static int vlan_dev_stop(struct net_device *dev) if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); + netif_carrier_off(dev); return 0; } @@ -612,6 +615,8 @@ static int vlan_dev_init(struct net_device *dev) struct net_device *real_dev = vlan_dev_info(dev)->real_dev; int subclass = 0; + netif_carrier_off(dev); + /* IFF_BROADCAST|IFF_MULTICAST; ??? */ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); dev->iflink = real_dev->ifindex; @@ -668,7 +673,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev, const struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; - if (!real_dev->ethtool_ops->get_settings) + if (!real_dev->ethtool_ops || + !real_dev->ethtool_ops->get_settings) return -EOPNOTSUPP; return real_dev->ethtool_ops->get_settings(real_dev, cmd); diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 334fcd4a4ea4..3100a8940afc 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -549,6 +549,7 @@ static void br2684_setup(struct net_device *netdev) struct br2684_dev *brdev = BRPRIV(netdev); ether_setup(netdev); + brdev->net_dev = netdev; netdev->netdev_ops = &br2684_netdev_ops; diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index 57aeba729bae..832bcf092a01 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c @@ -148,9 +148,13 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) { struct ax25_uid_assoc *pt; struct hlist_node *node; - int i = 0; + int i = 1; read_lock(&ax25_uid_lock); + + if (*pos == 0) + return SEQ_START_TOKEN; + ax25_uid_for_each(pt, node, &ax25_uid_list) { if (i == *pos) return pt; @@ -162,8 +166,10 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; - - return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, + if (v == SEQ_START_TOKEN) + return ax25_uid_list.first; + else + return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, ax25_uid_assoc, uid_node); } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1181db08d9de..61309b26f271 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -215,6 +215,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) conn->state = BT_OPEN; conn->power_save = 1; + conn->disc_timeout = HCI_DISCONN_TIMEOUT; switch (type) { case ACL_LINK: @@ -247,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); + hci_conn_init_sysfs(conn); + tasklet_enable(&hdev->tx_task); return conn; @@ -424,12 +427,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) if (sec_level == BT_SECURITY_SDP) return 1; - if (sec_level == BT_SECURITY_LOW) { - if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) - return hci_conn_auth(conn, sec_level, auth_type); - else - return 1; - } + if (sec_level == BT_SECURITY_LOW && + (!conn->ssp_mode || !conn->hdev->ssp_mode)) + return 1; if (conn->link_mode & HCI_LM_ENCRYPT) return hci_conn_auth(conn, sec_level, auth_type); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 55534244c3a0..4e7cb88e5da9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -866,8 +866,16 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); - if (!conn) - goto unlock; + if (!conn) { + if (ev->link_type != SCO_LINK) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + conn->type = SCO_LINK; + } if (!ev->status) { conn->handle = __le16_to_cpu(ev->handle); @@ -875,6 +883,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; @@ -1055,9 +1064,14 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } - } else + } else { hci_auth_cfm(conn, ev->status); + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); + } + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { if (!ev->status) { struct hci_cp_set_conn_encrypt cp; @@ -1471,7 +1485,21 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { + struct hci_ev_pin_code_req *ev = (void *) skb->data; + struct hci_conn *conn; + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + hci_conn_hold(conn); + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + hci_conn_put(conn); + } + + hci_dev_unlock(hdev); } static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -1481,7 +1509,21 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) { + struct hci_ev_link_key_notify *ev = (void *) skb->data; + struct hci_conn *conn; + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); + } + + hci_dev_unlock(hdev); } static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -1646,20 +1688,28 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu conn->type = SCO_LINK; } - if (conn->out && ev->status == 0x1c && conn->attempt < 2) { - conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | - (hdev->esco_type & EDR_ESCO_MASK); - hci_setup_sync(conn, conn->link->handle); - goto unlock; - } - - if (!ev->status) { + switch (ev->status) { + case 0x00: conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; hci_conn_add_sysfs(conn); - } else + break; + + case 0x1c: /* SCO interval rejected */ + case 0x1f: /* Unspecified error */ + if (conn->out && conn->attempt < 2) { + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); + hci_setup_sync(conn, conn->link->handle); + goto unlock; + } + /* fall through */ + + default: conn->state = BT_CLOSED; + break; + } hci_proto_connect_cfm(conn, ev->status); if (ev->status) diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index ed82796d4a0f..582d8877078c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -9,8 +9,7 @@ struct class *bt_class = NULL; EXPORT_SYMBOL_GPL(bt_class); -static struct workqueue_struct *btaddconn; -static struct workqueue_struct *btdelconn; +static struct workqueue_struct *bt_workq; static inline char *link_typetostr(int type) { @@ -88,9 +87,10 @@ static struct device_type bt_link = { static void add_conn(struct work_struct *work) { - struct hci_conn *conn = container_of(work, struct hci_conn, work); + struct hci_conn *conn = container_of(work, struct hci_conn, work_add); - flush_workqueue(btdelconn); + /* ensure previous del is complete */ + flush_work(&conn->work_del); if (device_add(&conn->dev) < 0) { BT_ERR("Failed to register connection device"); @@ -98,27 +98,6 @@ static void add_conn(struct work_struct *work) } } -void hci_conn_add_sysfs(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - - BT_DBG("conn %p", conn); - - conn->dev.type = &bt_link; - conn->dev.class = bt_class; - conn->dev.parent = &hdev->dev; - - dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); - - dev_set_drvdata(&conn->dev, conn); - - device_initialize(&conn->dev); - - INIT_WORK(&conn->work, add_conn); - - queue_work(btaddconn, &conn->work); -} - /* * The rfcomm tty device will possibly retain even when conn * is down, and sysfs doesn't support move zombie device, @@ -131,9 +110,15 @@ static int __match_tty(struct device *dev, void *data) static void del_conn(struct work_struct *work) { - struct hci_conn *conn = container_of(work, struct hci_conn, work); + struct hci_conn *conn = container_of(work, struct hci_conn, work_del); struct hci_dev *hdev = conn->hdev; + /* ensure previous add is complete */ + flush_work(&conn->work_add); + + if (!device_is_registered(&conn->dev)) + return; + while (1) { struct device *dev; @@ -149,16 +134,40 @@ static void del_conn(struct work_struct *work) hci_dev_put(hdev); } -void hci_conn_del_sysfs(struct hci_conn *conn) +void hci_conn_init_sysfs(struct hci_conn *conn) { + struct hci_dev *hdev = conn->hdev; + BT_DBG("conn %p", conn); - if (!device_is_registered(&conn->dev)) - return; + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + dev_set_drvdata(&conn->dev, conn); + + device_initialize(&conn->dev); + + INIT_WORK(&conn->work_add, add_conn); + INIT_WORK(&conn->work_del, del_conn); +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); - INIT_WORK(&conn->work, del_conn); + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); - queue_work(btdelconn, &conn->work); + queue_work(bt_workq, &conn->work_add); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(bt_workq, &conn->work_del); } static inline char *host_typetostr(int type) @@ -435,20 +444,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev) int __init bt_sysfs_init(void) { - btaddconn = create_singlethread_workqueue("btaddconn"); - if (!btaddconn) + bt_workq = create_singlethread_workqueue("bluetooth"); + if (!bt_workq) return -ENOMEM; - btdelconn = create_singlethread_workqueue("btdelconn"); - if (!btdelconn) { - destroy_workqueue(btaddconn); - return -ENOMEM; - } - bt_class = class_create(THIS_MODULE, "bluetooth"); if (IS_ERR(bt_class)) { - destroy_workqueue(btdelconn); - destroy_workqueue(btaddconn); + destroy_workqueue(bt_workq); return PTR_ERR(bt_class); } @@ -457,8 +459,7 @@ int __init bt_sysfs_init(void) void bt_sysfs_cleanup(void) { - destroy_workqueue(btaddconn); - destroy_workqueue(btdelconn); + destroy_workqueue(bt_workq); class_destroy(bt_class); } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 1d0fb0f23c63..374536e050aa 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -1194,6 +1194,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d) rfcomm_send_ua(d->session, d->dlci); + rfcomm_dlc_clear_timer(d); + rfcomm_dlc_lock(d); d->state = BT_CONNECTED; d->state_change(d, 0); diff --git a/net/can/af_can.c b/net/can/af_can.c index 547bafc79e28..10f0528c3bf5 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -674,8 +674,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, rcu_read_unlock(); - /* free the skbuff allocated by the netdevice driver */ - kfree_skb(skb); + /* consume the skbuff allocated by the netdevice driver */ + consume_skb(skb); if (matches > 0) { can_stats.matches++; diff --git a/net/core/datagram.c b/net/core/datagram.c index d0de644b378d..b01a76abe1d2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk) return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; } +static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, + void *key) +{ + unsigned long bits = (unsigned long)key; + + /* + * Avoid a wakeup if event not interesting for us + */ + if (bits && !(bits & (POLLIN | POLLERR))) + return 0; + return autoremove_wake_function(wait, mode, sync, key); +} /* * Wait for a packet.. */ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, receiver_wake_function); prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); diff --git a/net/core/dev.c b/net/core/dev.c index 91d792d17e09..e2e9e4af3ace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; +#ifdef CONFIG_NET_CLS_ACT + if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) + net_timestamp(skb); +#else net_timestamp(skb); +#endif rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_all, list) { @@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); } } EXPORT_SYMBOL(netif_device_detach); @@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { - netif_wake_queue(dev); + netif_tx_wake_all_queues(dev); __netdev_watchdog_up(dev); } } @@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) { u32 hash; - if (skb_rx_queue_recorded(skb)) { - hash = skb_get_rx_queue(skb); - } else if (skb->sk && skb->sk->sk_hash) { + if (skb_rx_queue_recorded(skb)) + return skb_get_rx_queue(skb) % dev->real_num_tx_queues; + + if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; - } else + else hash = skb->protocol; hash = jhash_1word(hash, skb_tx_hashrnd); @@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb) struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; int err = -ENOENT; - if (NAPI_GRO_CB(skb)->count == 1) + if (NAPI_GRO_CB(skb)->count == 1) { + skb_shinfo(skb)->gso_size = 0; goto out; + } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { @@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb) } out: - skb_shinfo(skb)->gso_size = 0; return netif_receive_skb(skb); } @@ -2539,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, } BUG_ON(info->nr_frags > MAX_SKB_FRAGS); - frag = &info->frags[info->nr_frags - 1]; + frag = info->frags; - for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { + for (i = 0; i < info->nr_frags; i++) { skb_fill_page_desc(skb, i, frag->page, frag->page_offset, frag->size); frag++; @@ -4399,7 +4406,7 @@ int register_netdevice(struct net_device *dev) dev->iflink = -1; #ifdef CONFIG_COMPAT_NET_DEV_OPS - /* Netdevice_ops API compatiability support. + /* Netdevice_ops API compatibility support. * This is temporary until all network devices are converted. */ if (dev->netdev_ops) { @@ -4410,7 +4417,7 @@ int register_netdevice(struct net_device *dev) dev->name, netdev_drivername(dev, drivername, 64)); /* This works only because net_device_ops and the - compatiablity structure are the same. */ + compatibility structure are the same. */ dev->netdev_ops = (void *) &(dev->init); } #endif diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce6356cd9f71..f091a5a845c1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) static inline struct page *linear_to_page(struct page *page, unsigned int *len, unsigned int *offset, - struct sk_buff *skb) + struct sk_buff *skb, struct sock *sk) { - struct sock *sk = skb->sk; struct page *p = sk->sk_sndmsg_page; unsigned int off; @@ -1405,13 +1404,14 @@ new_page: */ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, unsigned int *len, unsigned int offset, - struct sk_buff *skb, int linear) + struct sk_buff *skb, int linear, + struct sock *sk) { if (unlikely(spd->nr_pages == PIPE_BUFFERS)) return 1; if (linear) { - page = linear_to_page(page, len, &offset, skb); + page = linear_to_page(page, len, &offset, skb, sk); if (!page) return 1; } else @@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, static inline int __splice_segment(struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct sk_buff *skb, - struct splice_pipe_desc *spd, int linear) + struct splice_pipe_desc *spd, int linear, + struct sock *sk) { if (!*len) return 1; @@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, /* the linear region may spread across several pages */ flen = min_t(unsigned int, flen, PAGE_SIZE - poff); - if (spd_fill_page(spd, page, &flen, poff, skb, linear)) + if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) return 1; __segment_seek(&page, &poff, &plen, flen); @@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, * pipe is full or if we already spliced the requested length. */ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, - unsigned int *len, - struct splice_pipe_desc *spd) + unsigned int *len, struct splice_pipe_desc *spd, + struct sock *sk) { int seg; @@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), - offset, len, skb, spd, 1)) + offset, len, skb, spd, 1, sk)) return 1; /* @@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; if (__splice_segment(f->page, f->page_offset, f->size, - offset, len, skb, spd, 0)) + offset, len, skb, spd, 0, sk)) return 1; } @@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, .ops = &sock_pipe_buf_ops, .spd_release = sock_spd_release, }; + struct sock *sk = skb->sk; /* * __skb_splice_bits() only fails if the output has no room left, * so no point in going over the frag_list for the error case. */ - if (__skb_splice_bits(skb, &offset, &tlen, &spd)) + if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) goto done; else if (!tlen) goto done; @@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list && tlen; list = list->next) { - if (__skb_splice_bits(list, &offset, &tlen, &spd)) + if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) break; } } done: if (spd.nr_pages) { - struct sock *sk = skb->sk; int ret; /* diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 5ba533d234db..831fe1879dc0 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -253,9 +253,9 @@ unsigned int arpt_do_table(struct sk_buff *skb, indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; - rcu_read_lock_bh(); - private = rcu_dereference(table->private); - table_base = rcu_dereference(private->entries[smp_processor_id()]); + xt_info_rdlock_bh(); + private = table->private; + table_base = private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); @@ -273,6 +273,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + (2 * skb->dev->addr_len); + ADD_COUNTER(e->counters, hdr_len, 1); t = arpt_get_target(e); @@ -328,8 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, e = (void *)e + e->next_offset; } } while (!hotdrop); - - rcu_read_unlock_bh(); + xt_info_rdunlock_bh(); if (hotdrop) return NF_DROP; @@ -711,9 +711,12 @@ static void get_counters(const struct xt_table_info *t, /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU - * We dont care about preemption here. + * + * Bottom half has to be disabled to prevent deadlock + * if new softirq were to run and call ipt_do_table */ - curcpu = raw_smp_processor_id(); + local_bh_disable(); + curcpu = smp_processor_id(); i = 0; ARPT_ENTRY_ITERATE(t->entries[curcpu], @@ -726,73 +729,22 @@ static void get_counters(const struct xt_table_info *t, if (cpu == curcpu) continue; i = 0; + xt_info_wrlock(cpu); ARPT_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); + xt_info_wrunlock(cpu); } -} - - -/* We're lazy, and add to the first CPU; overflow works its fey magic - * and everything is OK. */ -static int -add_counter_to_entry(struct arpt_entry *e, - const struct xt_counters addme[], - unsigned int *i) -{ - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); - - (*i)++; - return 0; -} - -/* Take values from counters and add them back onto the current cpu */ -static void put_counters(struct xt_table_info *t, - const struct xt_counters counters[]) -{ - unsigned int i, cpu; - - local_bh_disable(); - cpu = smp_processor_id(); - i = 0; - ARPT_ENTRY_ITERATE(t->entries[cpu], - t->size, - add_counter_to_entry, - counters, - &i); local_bh_enable(); } -static inline int -zero_entry_counter(struct arpt_entry *e, void *arg) -{ - e->counters.bcnt = 0; - e->counters.pcnt = 0; - return 0; -} - -static void -clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) -{ - unsigned int cpu; - const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; - - memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); - for_each_possible_cpu(cpu) { - memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); - ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, - zero_entry_counter, NULL); - } -} - static struct xt_counters *alloc_counters(struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; struct xt_table_info *private = table->private; - struct xt_table_info *info; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care @@ -802,30 +754,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) - goto nomem; - - info = xt_alloc_table_info(private->size); - if (!info) - goto free_counters; - - clone_counters(info, private); - - mutex_lock(&table->lock); - xt_table_entry_swap_rcu(private, info); - synchronize_net(); /* Wait until smoke has cleared */ + return ERR_PTR(-ENOMEM); - get_counters(info, counters); - put_counters(private, counters); - mutex_unlock(&table->lock); - - xt_free_table_info(info); + get_counters(private, counters); return counters; - - free_counters: - vfree(counters); - nomem: - return ERR_PTR(-ENOMEM); } static int copy_entries_to_user(unsigned int total_size, @@ -1094,8 +1027,9 @@ static int __do_replace(struct net *net, const char *name, (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); - /* Get the old counters. */ + /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); + /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, @@ -1165,10 +1099,23 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) return ret; } +/* We're lazy, and add to the first CPU; overflow works its fey magic + * and everything is OK. */ +static int +add_counter_to_entry(struct arpt_entry *e, + const struct xt_counters addme[], + unsigned int *i) +{ + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); + + (*i)++; + return 0; +} + static int do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) { - unsigned int i; + unsigned int i, curcpu; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; @@ -1224,26 +1171,26 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len, goto free; } - mutex_lock(&t->lock); + local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } - preempt_disable(); i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[smp_processor_id()]; + curcpu = smp_processor_id(); + loc_cpu_entry = private->entries[curcpu]; + xt_info_wrlock(curcpu); ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, paddc, &i); - preempt_enable(); + xt_info_wrunlock(curcpu); unlock_up_free: - mutex_unlock(&t->lock); - + local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 810c0b62c7d4..2ec8d7290c40 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -338,10 +338,9 @@ ipt_do_table(struct sk_buff *skb, tgpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); - - rcu_read_lock_bh(); - private = rcu_dereference(table->private); - table_base = rcu_dereference(private->entries[smp_processor_id()]); + xt_info_rdlock_bh(); + private = table->private; + table_base = private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); @@ -436,8 +435,7 @@ ipt_do_table(struct sk_buff *skb, e = (void *)e + e->next_offset; } } while (!hotdrop); - - rcu_read_unlock_bh(); + xt_info_rdunlock_bh(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; @@ -896,10 +894,13 @@ get_counters(const struct xt_table_info *t, /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters - * with data used by 'current' CPU - * We dont care about preemption here. + * with data used by 'current' CPU. + * + * Bottom half has to be disabled to prevent deadlock + * if new softirq were to run and call ipt_do_table */ - curcpu = raw_smp_processor_id(); + local_bh_disable(); + curcpu = smp_processor_id(); i = 0; IPT_ENTRY_ITERATE(t->entries[curcpu], @@ -912,74 +913,22 @@ get_counters(const struct xt_table_info *t, if (cpu == curcpu) continue; i = 0; + xt_info_wrlock(cpu); IPT_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); + xt_info_wrunlock(cpu); } - -} - -/* We're lazy, and add to the first CPU; overflow works its fey magic - * and everything is OK. */ -static int -add_counter_to_entry(struct ipt_entry *e, - const struct xt_counters addme[], - unsigned int *i) -{ - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); - - (*i)++; - return 0; -} - -/* Take values from counters and add them back onto the current cpu */ -static void put_counters(struct xt_table_info *t, - const struct xt_counters counters[]) -{ - unsigned int i, cpu; - - local_bh_disable(); - cpu = smp_processor_id(); - i = 0; - IPT_ENTRY_ITERATE(t->entries[cpu], - t->size, - add_counter_to_entry, - counters, - &i); local_bh_enable(); } - -static inline int -zero_entry_counter(struct ipt_entry *e, void *arg) -{ - e->counters.bcnt = 0; - e->counters.pcnt = 0; - return 0; -} - -static void -clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) -{ - unsigned int cpu; - const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; - - memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); - for_each_possible_cpu(cpu) { - memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); - IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, - zero_entry_counter, NULL); - } -} - static struct xt_counters * alloc_counters(struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; struct xt_table_info *private = table->private; - struct xt_table_info *info; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -988,30 +937,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table) counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) - goto nomem; + return ERR_PTR(-ENOMEM); - info = xt_alloc_table_info(private->size); - if (!info) - goto free_counters; - - clone_counters(info, private); - - mutex_lock(&table->lock); - xt_table_entry_swap_rcu(private, info); - synchronize_net(); /* Wait until smoke has cleared */ - - get_counters(info, counters); - put_counters(private, counters); - mutex_unlock(&table->lock); - - xt_free_table_info(info); + get_counters(private, counters); return counters; - - free_counters: - vfree(counters); - nomem: - return ERR_PTR(-ENOMEM); } static int @@ -1306,8 +1236,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); - /* Get the old counters. */ + /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); + /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, @@ -1377,11 +1308,23 @@ do_replace(struct net *net, void __user *user, unsigned int len) return ret; } +/* We're lazy, and add to the first CPU; overflow works its fey magic + * and everything is OK. */ +static int +add_counter_to_entry(struct ipt_entry *e, + const struct xt_counters addme[], + unsigned int *i) +{ + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); + + (*i)++; + return 0; +} static int do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) { - unsigned int i; + unsigned int i, curcpu; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; @@ -1437,25 +1380,26 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat goto free; } - mutex_lock(&t->lock); + local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } - preempt_disable(); i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[raw_smp_processor_id()]; + curcpu = smp_processor_id(); + loc_cpu_entry = private->entries[curcpu]; + xt_info_wrlock(curcpu); IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, paddc, &i); - preempt_enable(); + xt_info_wrunlock(curcpu); unlock_up_free: - mutex_unlock(&t->lock); + local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c40debe51b38..c4c60e9f068a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3397,7 +3397,7 @@ int __init ip_rt_init(void) 0, &rt_hash_log, &rt_hash_mask, - 0); + rhash_entries ? 0 : 512 * 1024); memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); rt_hash_lock_init(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fafbec8b073e..1d7f49c6f0ca 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2511,6 +2511,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) struct sk_buff *p; struct tcphdr *th; struct tcphdr *th2; + unsigned int len; unsigned int thlen; unsigned int flags; unsigned int mss = 1; @@ -2531,6 +2532,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_gro_pull(skb, thlen); + len = skb_gro_len(skb); flags = tcp_flag_word(th); for (; (p = *head); head = &p->next) { @@ -2561,7 +2563,7 @@ found: mss = skb_shinfo(p)->gso_size; - flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb); + flush |= (len > mss) | !len; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); if (flush || skb_gro_receive(head, skb)) { @@ -2574,7 +2576,7 @@ found: tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); out_check_final: - flush = skb_gro_len(skb) < mss; + flush = len < mss; flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2bc8e27a163d..eec3e6f9956c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_grow_window(sk, skb); } -static u32 tcp_rto_min(struct sock *sk) -{ - struct dst_entry *dst = __sk_dst_get(sk); - u32 rto_min = TCP_RTO_MIN; - - if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) - rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); - return rto_min; -} - /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge @@ -928,6 +918,8 @@ static void tcp_init_metrics(struct sock *sk) tcp_set_rto(sk); if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) goto reset; + +cwnd: tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd_stamp = tcp_time_stamp; return; @@ -942,6 +934,7 @@ reset: tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; } + goto cwnd; } static void tcp_update_reordering(struct sock *sk, const int metric, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 53300fa2359f..59aec609cec6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -778,7 +778,7 @@ static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) if (tp->lost_skb_hint && before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && - (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked)) + (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) tp->lost_cnt_hint -= decr; tcp_verify_left_out(tp); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bda08a09357d..7a1d1ce22e66 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -222,7 +222,7 @@ fail: return error; } -int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) +static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) { struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); @@ -1823,7 +1823,6 @@ EXPORT_SYMBOL(udp_lib_getsockopt); EXPORT_SYMBOL(udp_lib_setsockopt); EXPORT_SYMBOL(udp_poll); EXPORT_SYMBOL(udp_lib_get_port); -EXPORT_SYMBOL(ipv4_rcv_saddr_equal); #ifdef CONFIG_PROC_FS EXPORT_SYMBOL(udp_proc_register); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d31df0f4bc9a..a7fdf9a27f15 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, default: goto sticky_done; } - - if ((rthdr->hdrlen & 1) || - (rthdr->hdrlen >> 1) != rthdr->segments_left) - goto sticky_done; } retv = 0; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 800ae8542471..219e165aea10 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb, IP_NF_ASSERT(table->valid_hooks & (1 << hook)); - rcu_read_lock_bh(); - private = rcu_dereference(table->private); - table_base = rcu_dereference(private->entries[smp_processor_id()]); + xt_info_rdlock_bh(); + private = table->private; + table_base = private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); @@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb, #ifdef CONFIG_NETFILTER_DEBUG ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; #endif - rcu_read_unlock_bh(); + xt_info_rdunlock_bh(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; @@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t, /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU - * We dont care about preemption here. + * + * Bottom half has to be disabled to prevent deadlock + * if new softirq were to run and call ipt_do_table */ - curcpu = raw_smp_processor_id(); + local_bh_disable(); + curcpu = smp_processor_id(); i = 0; IP6T_ENTRY_ITERATE(t->entries[curcpu], @@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t, if (cpu == curcpu) continue; i = 0; + xt_info_wrlock(cpu); IP6T_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); + xt_info_wrunlock(cpu); } -} - -/* We're lazy, and add to the first CPU; overflow works its fey magic - * and everything is OK. */ -static int -add_counter_to_entry(struct ip6t_entry *e, - const struct xt_counters addme[], - unsigned int *i) -{ - ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); - - (*i)++; - return 0; -} - -/* Take values from counters and add them back onto the current cpu */ -static void put_counters(struct xt_table_info *t, - const struct xt_counters counters[]) -{ - unsigned int i, cpu; - - local_bh_disable(); - cpu = smp_processor_id(); - i = 0; - IP6T_ENTRY_ITERATE(t->entries[cpu], - t->size, - add_counter_to_entry, - counters, - &i); local_bh_enable(); } -static inline int -zero_entry_counter(struct ip6t_entry *e, void *arg) -{ - e->counters.bcnt = 0; - e->counters.pcnt = 0; - return 0; -} - -static void -clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) -{ - unsigned int cpu; - const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; - - memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); - for_each_possible_cpu(cpu) { - memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); - IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, - zero_entry_counter, NULL); - } -} - static struct xt_counters *alloc_counters(struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; struct xt_table_info *private = table->private; - struct xt_table_info *info; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -1015,30 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) - goto nomem; + return ERR_PTR(-ENOMEM); - info = xt_alloc_table_info(private->size); - if (!info) - goto free_counters; - - clone_counters(info, private); - - mutex_lock(&table->lock); - xt_table_entry_swap_rcu(private, info); - synchronize_net(); /* Wait until smoke has cleared */ - - get_counters(info, counters); - put_counters(private, counters); - mutex_unlock(&table->lock); - - xt_free_table_info(info); + get_counters(private, counters); return counters; - - free_counters: - vfree(counters); - nomem: - return ERR_PTR(-ENOMEM); } static int @@ -1334,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); - /* Get the old counters. */ + /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); + /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, @@ -1405,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len) return ret; } +/* We're lazy, and add to the first CPU; overflow works its fey magic + * and everything is OK. */ +static int +add_counter_to_entry(struct ip6t_entry *e, + const struct xt_counters addme[], + unsigned int *i) +{ + ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); + + (*i)++; + return 0; +} + static int do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) { - unsigned int i; + unsigned int i, curcpu; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; @@ -1465,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, goto free; } - mutex_lock(&t->lock); + + local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } - preempt_disable(); i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[raw_smp_processor_id()]; + curcpu = smp_processor_id(); + xt_info_wrlock(curcpu); + loc_cpu_entry = private->entries[curcpu]; IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, paddc, &i); - preempt_enable(); + xt_info_wrunlock(curcpu); + unlock_up_free: - mutex_unlock(&t->lock); + local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6842dd2edd5b..8905712cfbb8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); + __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; + __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); int sk_ipv6only = ipv6_only_sock(sk); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(sk_rcv_saddr6); @@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) - return ipv4_rcv_saddr_equal(sk, sk2); + return (!sk2_ipv6only && + (!sk_rcv_saddr || !sk2_rcv_saddr || + sk_rcv_saddr == sk2_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 0af823cf7f1f..5ee5a031bc93 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -72,6 +72,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) #endif skb->protocol = htons(ETH_P_IPV6); + skb->local_df = 1; return x->outer_mode->output2(x, skb); } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 49e786535dc8..b51c9187c347 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -172,6 +172,7 @@ static void iucv_sock_close(struct sock *sk) err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); } + case IUCV_CLOSING: /* fall through */ sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); @@ -224,6 +225,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) spin_lock_init(&iucv_sk(sk)->message_q.lock); skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); iucv_sk(sk)->send_tag = 0; + iucv_sk(sk)->path = NULL; + memset(&iucv_sk(sk)->src_user_id , 0, 32); sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; @@ -811,6 +814,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + /* receive/dequeue next skb: + * the function understands MSG_PEEK and, thus, does not dequeue skb */ skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -858,9 +863,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, iucv_process_message_q(sk); spin_unlock_bh(&iucv->message_q.lock); } - - } else - skb_queue_head(&sk->sk_receive_queue, skb); + } done: return err ? : copied; @@ -934,6 +937,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how) lock_sock(sk); switch (sk->sk_state) { + case IUCV_DISCONN: + case IUCV_CLOSING: + case IUCV_SEVERED: case IUCV_CLOSED: err = -ENOTCONN; goto fail; @@ -1113,8 +1119,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) struct sock_msg_q *save_msg; int len; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (sk->sk_shutdown & RCV_SHUTDOWN) { + iucv_message_reject(path, msg); return; + } + + spin_lock(&iucv->message_q.lock); if (!list_empty(&iucv->message_q.list) || !skb_queue_empty(&iucv->backlog_skb_q)) @@ -1129,9 +1139,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) if (!skb) goto save_message; - spin_lock(&iucv->message_q.lock); iucv_process_message(sk, skb, path, msg); - spin_unlock(&iucv->message_q.lock); + goto out_unlock; return; @@ -1142,8 +1151,9 @@ save_message: save_msg->path = path; save_msg->msg = *msg; - spin_lock(&iucv->message_q.lock); list_add_tail(&save_msg->list, &iucv->message_q.list); + +out_unlock: spin_unlock(&iucv->message_q.lock); } diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index f3d9ae350fb6..ecc3faf9f11a 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -202,10 +202,3 @@ config MAC80211_DEBUG_COUNTERS and show them in debugfs. If unsure, say N. - -config MAC80211_VERBOSE_SPECT_MGMT_DEBUG - bool "Verbose Spectrum Management (IEEE 802.11h)debugging" - depends on MAC80211_DEBUG_MENU - ---help--- - Say Y here to print out verbose Spectrum Management (IEEE 802.11h) - debug messages. diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a6f1d8a869bc..14134193cd17 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -258,7 +258,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) (chan->max_power - local->power_constr_level) : chan->max_power; - if (local->user_power_level) + if (local->user_power_level >= 0) power = min(power, local->user_power_level); if (local->hw.conf.power_level != power) { @@ -757,6 +757,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.conf.long_frame_max_tx_count = 4; local->hw.conf.short_frame_max_tx_count = 7; local->hw.conf.radio_enabled = true; + local->user_power_level = -1; INIT_LIST_HEAD(&local->interfaces); mutex_init(&local->iflist_mtx); @@ -909,6 +910,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (result < 0) goto fail_sta_info; + result = ieee80211_wep_init(local); + if (result < 0) { + printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", + wiphy_name(local->hw.wiphy), result); + goto fail_wep; + } + rtnl_lock(); result = dev_alloc_name(local->mdev, local->mdev->name); if (result < 0) @@ -930,14 +938,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) goto fail_rate; } - result = ieee80211_wep_init(local); - - if (result < 0) { - printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", - wiphy_name(local->hw.wiphy), result); - goto fail_wep; - } - /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { result = ieee80211_if_add(local, "wlan%d", NULL, @@ -967,13 +967,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) return 0; -fail_wep: - rate_control_deinitialize(local); fail_rate: unregister_netdevice(local->mdev); local->mdev = NULL; fail_dev: rtnl_unlock(); + ieee80211_wep_free(local); +fail_wep: sta_info_stop(local); fail_sta_info: debugfs_hw_del(local); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7ecda9d59d8a..132938b073dc 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -441,6 +441,9 @@ static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid) u8 index, indexn1, indexn2; struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim; + if (unlikely(!tim || elems->tim_len < 4)) + return false; + aid &= 0x3fff; index = aid / 8; mask = 1 << (aid & 7); @@ -945,9 +948,13 @@ void ieee80211_beacon_loss_work(struct work_struct *work) u.mgd.beacon_loss_work); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " - "- sending probe request\n", sdata->dev->name, - sdata->u.mgd.bssid); +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " + "- sending probe request\n", sdata->dev->name, + sdata->u.mgd.bssid); + } +#endif ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, @@ -1007,9 +1014,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) (local->hw.conf.flags & IEEE80211_CONF_PS)) && time_after(jiffies, ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { - printk(KERN_DEBUG "%s: beacon loss from AP %pM " - "- sending probe request\n", - sdata->dev->name, ifmgd->bssid); +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: beacon loss from AP %pM " + "- sending probe request\n", + sdata->dev->name, ifmgd->bssid); + } +#endif ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, ifmgd->ssid_len, NULL, 0); @@ -1355,7 +1366,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, for (i = 0; i < elems.ext_supp_rates_len; i++) { int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; - bool is_basic = !!(elems.supp_rates[i] & 0x80); + bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); if (rate > 110) have_higher_than_11mbit = true; @@ -1902,9 +1913,17 @@ static void ieee80211_sta_work(struct work_struct *work) static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) { - if (sdata->vif.type == NL80211_IFTYPE_STATION) + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * Need to update last_beacon to avoid beacon loss + * test to trigger. + */ + sdata->u.mgd.last_beacon = jiffies; + + queue_work(sdata->local->hw.workqueue, &sdata->u.mgd.work); + } } /* interface setup */ @@ -2105,12 +2124,13 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_enable_work); + /* XXX: using scan_sdata is completely broken! */ struct ieee80211_sub_if_data *sdata = local->scan_sdata; if (local->hw.conf.flags & IEEE80211_CONF_PS) return; - if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata) ieee80211_send_nullfunc(local, sdata, 1); local->hw.conf.flags |= IEEE80211_CONF_PS; diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 027302326498..81985d27cbda 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw) case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: - WARN_ON(ieee80211_if_config(sdata, changed)); - ieee80211_bss_info_change_notify(sdata, ~0); + /* + * Driver's config_interface can fail if rfkill is + * enabled. Accommodate this return code. + * FIXME: When mac80211 has knowledge of rfkill + * state the code below can change back to: + * WARN(ieee80211_if_config(sdata, changed)); + * ieee80211_bss_info_change_notify(sdata, ~0); + */ + if (ieee80211_if_config(sdata, changed)) + printk(KERN_DEBUG "%s: failed to configure interface during resume\n", + sdata->dev->name); + else + ieee80211_bss_info_change_notify(sdata, ~0); break; case NL80211_IFTYPE_WDS: break; diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3824990d340b..70df3dcc3cf6 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -476,7 +476,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) return NULL; for (i = 0; i < IEEE80211_NUM_BANDS; i++) { - sband = hw->wiphy->bands[hw->conf.channel->band]; + sband = hw->wiphy->bands[i]; if (sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; } diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index b16801cde06f..01d59a8e334c 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c @@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { struct rc_pid_sta_info *spinfo = priv_sta; + struct rc_pid_info *pinfo = priv; + struct rc_pid_rateinfo *rinfo = pinfo->rinfo; struct sta_info *si; + int i, j, tmp; + bool s; /* TODO: This routine should consider using RSSI from previous packets * as we need to have IEEE 802.1X auth succeed immediately after assoc.. * Until that method is implemented, we will use the lowest supported * rate as a workaround. */ + /* Sort the rates. This is optimized for the most common case (i.e. + * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed + * mapping too. */ + for (i = 0; i < sband->n_bitrates; i++) { + rinfo[i].index = i; + rinfo[i].rev_index = i; + if (RC_PID_FAST_START) + rinfo[i].diff = 0; + else + rinfo[i].diff = i * pinfo->norm_offset; + } + for (i = 1; i < sband->n_bitrates; i++) { + s = 0; + for (j = 0; j < sband->n_bitrates - i; j++) + if (unlikely(sband->bitrates[rinfo[j].index].bitrate > + sband->bitrates[rinfo[j + 1].index].bitrate)) { + tmp = rinfo[j].index; + rinfo[j].index = rinfo[j + 1].index; + rinfo[j + 1].index = tmp; + rinfo[rinfo[j].index].rev_index = j; + rinfo[rinfo[j + 1].index].rev_index = j + 1; + s = 1; + } + if (!s) + break; + } + spinfo->txrate_idx = rate_lowest_index(sband, sta); /* HACK */ si = container_of(sta, struct sta_info, sta); @@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, struct rc_pid_info *pinfo; struct rc_pid_rateinfo *rinfo; struct ieee80211_supported_band *sband; - int i, j, tmp; - bool s; + int i, max_rates = 0; #ifdef CONFIG_MAC80211_DEBUGFS struct rc_pid_debugfs_entries *de; #endif - sband = hw->wiphy->bands[hw->conf.channel->band]; - pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); if (!pinfo) return NULL; - /* We can safely assume that sband won't change unless we get - * reinitialized. */ - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); if (!rinfo) { kfree(pinfo); return NULL; @@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, pinfo->rinfo = rinfo; pinfo->oldrate = 0; - /* Sort the rates. This is optimized for the most common case (i.e. - * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed - * mapping too. */ - for (i = 0; i < sband->n_bitrates; i++) { - rinfo[i].index = i; - rinfo[i].rev_index = i; - if (RC_PID_FAST_START) - rinfo[i].diff = 0; - else - rinfo[i].diff = i * pinfo->norm_offset; - } - for (i = 1; i < sband->n_bitrates; i++) { - s = 0; - for (j = 0; j < sband->n_bitrates - i; j++) - if (unlikely(sband->bitrates[rinfo[j].index].bitrate > - sband->bitrates[rinfo[j + 1].index].bitrate)) { - tmp = rinfo[j].index; - rinfo[j].index = rinfo[j + 1].index; - rinfo[j + 1].index = tmp; - rinfo[rinfo[j].index].rev_index = j; - rinfo[rinfo[j + 1].index].rev_index = j + 1; - s = 1; - } - if (!s) - break; - } - #ifdef CONFIG_MAC80211_DEBUGFS de = &pinfo->dentries; de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 64ebe664effc..9776f73c51ad 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -29,6 +29,7 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, struct tid_ampdu_rx *tid_agg_rx, struct sk_buff *skb, + struct ieee80211_rx_status *status, u16 mpdu_seq_num, int bar_req); /* @@ -1396,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) * mac80211. That also explains the __skb_push() * below. */ - align = (unsigned long)skb->data & 4; + align = (unsigned long)skb->data & 3; if (align) { if (WARN_ON(skb_headroom(skb) < 3)) { dev_kfree_skb(skb); @@ -1688,7 +1689,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) /* manage reordering buffer according to requested */ /* sequence number */ rcu_read_lock(); - ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, + ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL, start_seq_num, 1); rcu_read_unlock(); return RX_DROP_UNUSABLE; @@ -2293,6 +2294,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, struct tid_ampdu_rx *tid_agg_rx, struct sk_buff *skb, + struct ieee80211_rx_status *rxstatus, u16 mpdu_seq_num, int bar_req) { @@ -2374,6 +2376,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, /* put the frame in the reordering buffer */ tid_agg_rx->reorder_buf[index] = skb; + memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, + sizeof(*rxstatus)); tid_agg_rx->stored_mpdu_num++; /* release the buffer until next missing frame */ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) @@ -2399,7 +2403,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, } static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, - struct sk_buff *skb) + struct sk_buff *skb, + struct ieee80211_rx_status *status) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -2448,7 +2453,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, /* according to mpdu sequence number deal with reordering buffer */ mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; - ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, + ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status, mpdu_seq_num, 0); end_reorder: return ret; @@ -2512,7 +2517,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, return; } - if (!ieee80211_rx_reorder_ampdu(local, skb)) + if (!ieee80211_rx_reorder_ampdu(local, skb, status)) __ieee80211_rx_handle_packet(hw, skb, status, rate); rcu_read_unlock(); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3fb04a86444d..63656266d567 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why is TX_FRAGMENTED set? */ - if (WARN_ON(skb->len <= frag_threshold)) + if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index deb4ecec122a..959aa8379ccf 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -417,6 +417,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev, { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_channel* chan = local->hw.conf.channel; + bool reconf = false; u32 reconf_flags = 0; int new_power_level; @@ -427,14 +428,38 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev, if (!chan) return -EINVAL; - if (data->txpower.fixed) - new_power_level = min(data->txpower.value, chan->max_power); - else /* Automatic power level setting */ - new_power_level = chan->max_power; + /* only change when not disabling */ + if (!data->txpower.disabled) { + if (data->txpower.fixed) { + if (data->txpower.value < 0) + return -EINVAL; + new_power_level = data->txpower.value; + /* + * Debatable, but we cannot do a fixed power + * level above the regulatory constraint. + * Use "iwconfig wlan0 txpower 15dBm" instead. + */ + if (new_power_level > chan->max_power) + return -EINVAL; + } else { + /* + * Automatic power level setting, max being the value + * passed in from userland. + */ + if (data->txpower.value < 0) + new_power_level = -1; + else + new_power_level = data->txpower.value; + } + + reconf = true; - local->user_power_level = new_power_level; - if (local->hw.conf.power_level != new_power_level) - reconf_flags |= IEEE80211_CONF_CHANGE_POWER; + /* + * ieee80211_hw_config() will limit to the channel's + * max power and possibly power constraint from AP. + */ + local->user_power_level = new_power_level; + } if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { local->hw.conf.radio_enabled = !(data->txpower.disabled); @@ -442,7 +467,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev, ieee80211_led_radio(local, local->hw.conf.radio_enabled); } - if (reconf_flags) + if (reconf || reconf_flags) ieee80211_hw_config(local, reconf_flags); return 0; @@ -530,7 +555,7 @@ static int ieee80211_ioctl_giwfrag(struct net_device *dev, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); frag->value = local->fragmentation_threshold; - frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD); + frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD); frag->fixed = 1; return 0; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 881203c4a142..cb3ad741ebf8 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -837,6 +837,7 @@ config NETFILTER_XT_MATCH_SOCKET depends on NETFILTER_TPROXY depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED + depends on !NF_CONNTRACK || NF_CONNTRACK select NF_DEFRAG_IPV4 help This option adds a `socket' match, which can be used to match diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index fd77619256a8..c523f0b8cee5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -988,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) { struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); - char *helpname; + char *helpname = NULL; int err; /* don't change helper of sibling connections */ @@ -1209,7 +1209,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[], rcu_read_lock(); if (cda[CTA_HELP]) { - char *helpname; + char *helpname = NULL; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); if (err < 0) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 509a95621f9f..150e5cf62f85 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -625,20 +625,6 @@ void xt_free_table_info(struct xt_table_info *info) } EXPORT_SYMBOL(xt_free_table_info); -void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo, - struct xt_table_info *newinfo) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) { - void *p = oldinfo->entries[cpu]; - rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]); - newinfo->entries[cpu] = p; - } - -} -EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu); - /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name) @@ -676,32 +662,43 @@ void xt_compat_unlock(u_int8_t af) EXPORT_SYMBOL_GPL(xt_compat_unlock); #endif +DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); +EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); + + struct xt_table_info * xt_replace_table(struct xt_table *table, unsigned int num_counters, struct xt_table_info *newinfo, int *error) { - struct xt_table_info *oldinfo, *private; + struct xt_table_info *private; /* Do the substitution. */ - mutex_lock(&table->lock); + local_bh_disable(); private = table->private; + /* Check inside lock: is the old number correct? */ if (num_counters != private->number) { duprintf("num_counters != table->private->number (%u/%u)\n", num_counters, private->number); - mutex_unlock(&table->lock); + local_bh_enable(); *error = -EAGAIN; return NULL; } - oldinfo = private; - rcu_assign_pointer(table->private, newinfo); - newinfo->initial_entries = oldinfo->initial_entries; - mutex_unlock(&table->lock); - synchronize_net(); - return oldinfo; + table->private = newinfo; + newinfo->initial_entries = private->initial_entries; + + /* + * Even though table entries have now been swapped, other CPU's + * may still be using the old entries. This is okay, because + * resynchronization happens because of the locking done + * during the get_counters() routine. + */ + local_bh_enable(); + + return private; } EXPORT_SYMBOL_GPL(xt_replace_table); @@ -734,7 +731,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table, /* Simplifies replace_table code. */ table->private = bootstrap; - mutex_init(&table->lock); if (!xt_replace_table(table, 0, newinfo, &ret)) goto unlock; @@ -1147,7 +1143,14 @@ static struct pernet_operations xt_net_ops = { static int __init xt_init(void) { - int i, rv; + unsigned int i; + int rv; + + for_each_possible_cpu(i) { + struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); + spin_lock_init(&lock->lock); + lock->readers = 0; + } xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); if (!xt) diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c index 834c6eb7f484..c0519139679e 100644 --- a/net/netlabel/netlabel_addrlist.c +++ b/net/netlabel/netlabel_addrlist.c @@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask, { struct netlbl_af4list *entry; - entry = netlbl_af4list_search(addr, head); - if (entry != NULL && entry->addr == addr && entry->mask == mask) { - netlbl_af4list_remove_entry(entry); - return entry; - } - - return NULL; + entry = netlbl_af4list_search_exact(addr, mask, head); + if (entry == NULL) + return NULL; + netlbl_af4list_remove_entry(entry); + return entry; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr, { struct netlbl_af6list *entry; - entry = netlbl_af6list_search(addr, head); - if (entry != NULL && - ipv6_addr_equal(&entry->addr, addr) && - ipv6_addr_equal(&entry->mask, mask)) { - netlbl_af6list_remove_entry(entry); - return entry; - } - - return NULL; + entry = netlbl_af6list_search_exact(addr, mask, head); + if (entry == NULL) + return NULL; + netlbl_af6list_remove_entry(entry); + return entry; } #endif /* IPv6 */ diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 4e705f87969f..3be0e016ab7d 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, /* Build a packet - the conventional user limit is 236 bytes. We can do ludicrously large NetROM frames but must not overflow */ - if (len > 65536) - return -EMSGSIZE; + if (len > 65536) { + err = -EMSGSIZE; + goto out; + } SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 74776de523ec..f546e81acc45 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1758,8 +1758,9 @@ static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) static inline char *alloc_one_pg_vec_page(unsigned long order) { - return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, - order); + gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN; + + return (char *) __get_free_pages(gfp_flags, order); } static char **alloc_pg_vec(struct tpacket_req *req, int order) diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 0f36e8d59b29..877a7f65f707 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, unsigned char *asmptr; int n, size, qbit = 0; - /* ROSE empty frame has no meaning : don't send */ - if (len == 0) - return 0; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); copied = skb->len; - /* ROSE empty frame has no meaning : ignore it */ - if (copied == 0) { - skb_free_datagram(sk, skb); - return copied; - } - if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 173fcc4b050d..0759f32e9dca 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -254,7 +254,7 @@ replay: } tp->ops = tp_ops; tp->protocol = protocol; - tp->prio = nprio ? : tcf_auto_prio(*back); + tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); tp->q = q; tp->classify = tp_ops->classify; tp->classid = parent; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 72cf86e3c090..fad596bf32d7 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -176,8 +176,10 @@ META_COLLECTOR(var_dev) META_COLLECTOR(int_vlan_tag) { - unsigned short uninitialized_var(tag); - if (vlan_get_tag(skb, &tag) < 0) + unsigned short tag; + + tag = vlan_tx_tag_get(skb); + if (!tag && __vlan_get_tag(skb, &tag)) *err = -1; else dst->value = tag; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index d876b8734848..2b88295cb7b7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -280,6 +280,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (unlikely(!skb)) return NULL; +#ifdef CONFIG_NET_CLS_ACT + /* + * If it's at ingress let's pretend the delay is + * from the network (tstamp will be updated). + */ + if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) + skb->tstamp.tv64 = 0; +#endif pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; return skb; diff --git a/net/socket.c b/net/socket.c index 91d0c0254ffe..791d71a36a93 100644 --- a/net/socket.c +++ b/net/socket.c @@ -493,8 +493,7 @@ static struct socket *sock_alloc(void) inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - get_cpu_var(sockets_in_use)++; - put_cpu_var(sockets_in_use); + percpu_add(sockets_in_use, 1); return sock; } @@ -536,8 +535,7 @@ void sock_release(struct socket *sock) if (sock->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); - get_cpu_var(sockets_in_use)--; - put_cpu_var(sockets_in_use); + percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 9b49a6ab8ded..8847add6ca16 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1008,6 +1008,8 @@ svc_process(struct svc_rqst *rqstp) rqstp->rq_res.tail[0].iov_len = 0; /* Will be turned off only in gss privacy case: */ rqstp->rq_splice_ok = 1; + /* Will be turned off only when NFSv4 Sessions are used */ + rqstp->rq_usedeferral = 1; /* Setup reply header */ rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); @@ -1078,7 +1080,6 @@ svc_process(struct svc_rqst *rqstp) procp = versp->vs_proc + proc; if (proc >= versp->vs_nproc || !procp->pc_func) goto err_bad_proc; - rqstp->rq_server = serv; rqstp->rq_procinfo = procp; /* Syntactic check complete */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 2819ee093f36..c200d92e57e4 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -14,6 +14,8 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT +#define SVC_MAX_WAKING 5 + static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); @@ -301,6 +303,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; + int thread_avail; if (!(xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) @@ -312,18 +315,14 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) spin_lock_bh(&pool->sp_lock); - if (!list_empty(&pool->sp_threads) && - !list_empty(&pool->sp_sockets)) - printk(KERN_ERR - "svc_xprt_enqueue: " - "threads and transports both waiting??\n"); - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { /* Don't enqueue dead transports */ dprintk("svc: transport %p is dead, not enqueued\n", xprt); goto out_unlock; } + pool->sp_stats.packets++; + /* Mark transport as busy. It will remain in this state until * the provider calls svc_xprt_received. We update XPT_BUSY * atomically because it also guards against trying to enqueue @@ -356,7 +355,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) } process: - if (!list_empty(&pool->sp_threads)) { + /* Work out whether threads are available */ + thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */ + if (pool->sp_nwaking >= SVC_MAX_WAKING) { + /* too many threads are runnable and trying to wake up */ + thread_avail = 0; + pool->sp_stats.overloads_avoided++; + } + + if (thread_avail) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); @@ -371,11 +378,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) svc_xprt_get(xprt); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); + rqstp->rq_waking = 1; + pool->sp_nwaking++; + pool->sp_stats.threads_woken++; BUG_ON(xprt->xpt_pool != pool); wake_up(&rqstp->rq_wait); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); + pool->sp_stats.sockets_queued++; BUG_ON(xprt->xpt_pool != pool); } @@ -588,6 +599,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) int pages; struct xdr_buf *arg; DECLARE_WAITQUEUE(wait, current); + long time_left; dprintk("svc: server %p waiting for data (to = %ld)\n", rqstp, timeout); @@ -636,6 +648,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) return -EINTR; spin_lock_bh(&pool->sp_lock); + if (rqstp->rq_waking) { + rqstp->rq_waking = 0; + pool->sp_nwaking--; + BUG_ON(pool->sp_nwaking < 0); + } xprt = svc_xprt_dequeue(pool); if (xprt) { rqstp->rq_xprt = xprt; @@ -668,12 +685,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) add_wait_queue(&rqstp->rq_wait, &wait); spin_unlock_bh(&pool->sp_lock); - schedule_timeout(timeout); + time_left = schedule_timeout(timeout); try_to_freeze(); spin_lock_bh(&pool->sp_lock); remove_wait_queue(&rqstp->rq_wait, &wait); + if (!time_left) + pool->sp_stats.threads_timedout++; xprt = rqstp->rq_xprt; if (!xprt) { @@ -958,7 +977,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); struct svc_deferred_req *dr; - if (rqstp->rq_arg.page_len) + if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) return NULL; /* if more than a page, give up FIXME */ if (rqstp->rq_deferred) { dr = rqstp->rq_deferred; @@ -1112,3 +1131,93 @@ int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen) return totlen; } EXPORT_SYMBOL_GPL(svc_xprt_names); + + +/*----------------------------------------------------------------------------*/ + +static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) +{ + unsigned int pidx = (unsigned int)*pos; + struct svc_serv *serv = m->private; + + dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); + + lock_kernel(); + /* bump up the pseudo refcount while traversing */ + svc_get(serv); + unlock_kernel(); + + if (!pidx) + return SEQ_START_TOKEN; + return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); +} + +static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct svc_pool *pool = p; + struct svc_serv *serv = m->private; + + dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); + + if (p == SEQ_START_TOKEN) { + pool = &serv->sv_pools[0]; + } else { + unsigned int pidx = (pool - &serv->sv_pools[0]); + if (pidx < serv->sv_nrpools-1) + pool = &serv->sv_pools[pidx+1]; + else + pool = NULL; + } + ++*pos; + return pool; +} + +static void svc_pool_stats_stop(struct seq_file *m, void *p) +{ + struct svc_serv *serv = m->private; + + lock_kernel(); + /* this function really, really should have been called svc_put() */ + svc_destroy(serv); + unlock_kernel(); +} + +static int svc_pool_stats_show(struct seq_file *m, void *p) +{ + struct svc_pool *pool = p; + + if (p == SEQ_START_TOKEN) { + seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); + return 0; + } + + seq_printf(m, "%u %lu %lu %lu %lu %lu\n", + pool->sp_id, + pool->sp_stats.packets, + pool->sp_stats.sockets_queued, + pool->sp_stats.threads_woken, + pool->sp_stats.overloads_avoided, + pool->sp_stats.threads_timedout); + + return 0; +} + +static const struct seq_operations svc_pool_stats_seq_ops = { + .start = svc_pool_stats_start, + .next = svc_pool_stats_next, + .stop = svc_pool_stats_stop, + .show = svc_pool_stats_show, +}; + +int svc_pool_stats_open(struct svc_serv *serv, struct file *file) +{ + int err; + + err = seq_open(file, &svc_pool_stats_seq_ops); + if (!err) + ((struct seq_file *) file->private_data)->private = serv; + return err; +} +EXPORT_SYMBOL(svc_pool_stats_open); + +/*----------------------------------------------------------------------------*/ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 9d504234af4a..af3198814c15 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -345,7 +345,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, lock_sock(sock->sk); sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_rcvbuf = rcv * 2; - sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; release_sock(sock->sk); #endif } @@ -797,23 +796,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); - if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) - /* sndbuf needs to have room for one request - * per thread, otherwise we can stall even when the - * network isn't a bottleneck. - * - * We count all threads rather than threads in a - * particular pool, which provides an upper bound - * on the number of threads which will access the socket. - * - * rcvbuf just needs to be able to hold a few requests. - * Normally they will be removed from the queue - * as soon a a complete request arrives. - */ - svc_sock_setbufsize(svsk->sk_sock, - (serv->sv_nrthreads+3) * serv->sv_max_mesg, - 3 * serv->sv_max_mesg); - clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* Receive data. If we haven't got the record length yet, get @@ -1061,15 +1043,6 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; - /* initialise setting must have enough space to - * receive and respond to one request. - * svc_tcp_recvfrom will re-adjust if necessary - */ - svc_sock_setbufsize(svsk->sk_sock, - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, - 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); - - set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (sk->sk_state != TCP_ESTABLISHED) set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); @@ -1139,8 +1112,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, /* Initialize the socket */ if (sock->type == SOCK_DGRAM) svc_udp_init(svsk, serv); - else + else { + /* initialise setting must have enough space to + * receive and respond to one request. + */ + svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg, + 4 * serv->sv_max_mesg); svc_tcp_init(svsk, serv); + } dprintk("svc: svc_setup_socket created %p (inet %p)\n", svsk, svsk->sk_sk); diff --git a/net/wireless/core.h b/net/wireless/core.h index d43daa236ef9..0a592e4295f0 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -90,7 +90,7 @@ struct cfg80211_internal_bss { struct rb_node rbn; unsigned long ts; struct kref ref; - bool hold; + bool hold, ies_allocated; /* must be last because of priv member */ struct cfg80211_bss pub; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 353e1a4ece83..2456e4ee445e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3334,7 +3334,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!msg) return; @@ -3353,7 +3353,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); + genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC); return; nla_put_failure: diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6327e1617acb..08265ca15785 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info); int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, const struct ieee80211_reg_rule **reg_rule) { + assert_cfg80211_lock(); return freq_reg_info_regd(wiphy, center_freq, bandwidth, reg_rule, NULL); } @@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) if (is_world_regdom(cfg80211_regdomain->alpha2) || (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) return true; - if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && + if (last_request && + last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->custom_regulatory) return true; return false; @@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { + /* + * Means we are just firing up cfg80211, so no beacons would + * have been processed yet. + */ + if (!last_request) + return; if (!reg_is_world_roaming(wiphy)) return; wiphy_update_beacon_reg(wiphy); @@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; + assert_cfg80211_lock(); + sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; @@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { enum ieee80211_band band; + + mutex_lock(&cfg80211_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band_custom(wiphy, band, regd); } + mutex_unlock(&cfg80211_mutex); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); @@ -1423,7 +1436,7 @@ new_request: return call_crda(last_request->alpha2); } -/* This currently only processes user and driver regulatory hints */ +/* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { int r = 0; @@ -2095,11 +2108,12 @@ int set_regdom(const struct ieee80211_regdomain *rd) /* Caller must hold cfg80211_mutex */ void reg_device_remove(struct wiphy *wiphy) { - struct wiphy *request_wiphy; + struct wiphy *request_wiphy = NULL; assert_cfg80211_lock(); - request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + if (last_request) + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); kfree(wiphy->regd); if (!last_request || !request_wiphy) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2a00e362f5fe..1f260c40b6ca 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -58,6 +58,10 @@ static void bss_release(struct kref *ref) bss = container_of(ref, struct cfg80211_internal_bss, ref); if (bss->pub.free_priv) bss->pub.free_priv(&bss->pub); + + if (bss->ies_allocated) + kfree(bss->pub.information_elements); + kfree(bss); } @@ -360,19 +364,42 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, found = rb_find_bss(dev, res); - if (found && overwrite) { - list_replace(&found->list, &res->list); - rb_replace_node(&found->rbn, &res->rbn, - &dev->bss_tree); - kref_put(&found->ref, bss_release); - found = res; - } else if (found) { + if (found) { kref_get(&found->ref); found->pub.beacon_interval = res->pub.beacon_interval; found->pub.tsf = res->pub.tsf; found->pub.signal = res->pub.signal; found->pub.capability = res->pub.capability; found->ts = res->ts; + + /* overwrite IEs */ + if (overwrite) { + size_t used = dev->wiphy.bss_priv_size + sizeof(*res); + size_t ielen = res->pub.len_information_elements; + + if (ksize(found) >= used + ielen) { + memcpy(found->pub.information_elements, + res->pub.information_elements, ielen); + found->pub.len_information_elements = ielen; + } else { + u8 *ies = found->pub.information_elements; + + if (found->ies_allocated) { + if (ksize(ies) < ielen) + ies = krealloc(ies, ielen, + GFP_ATOMIC); + } else + ies = kmalloc(ielen, GFP_ATOMIC); + + if (ies) { + memcpy(ies, res->pub.information_elements, ielen); + found->ies_allocated = true; + found->pub.information_elements = ies; + found->pub.len_information_elements = ielen; + } + } + } + kref_put(&res->ref, bss_release); } else { /* this "consumes" the reference */ diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 82271720d970..5f1f86565f16 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, { static xfrm_address_t saddr_wildcard = { }; struct net *net = xp_net(pol); - unsigned int h; + unsigned int h, h_wildcard; struct hlist_node *entry; struct xfrm_state *x, *x0, *to_put; int acquire_in_progress = 0; @@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, if (best) goto found; - h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); - hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { + h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); + hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { if (x->props.family == family && x->props.reqid == tmpl->reqid && !(x->props.flags & XFRM_STATE_WILDRECV) && |