diff options
author | Xinyu Chen <xinyu.chen@freescale.com> | 2012-02-02 16:58:05 +0800 |
---|---|---|
committer | Xinyu Chen <xinyu.chen@freescale.com> | 2012-02-02 16:58:05 +0800 |
commit | 599a980adc5c2ba32f6aedcd87640b781d347d1f (patch) | |
tree | 9163c58e47cf900303b6db19d2e6bd4468a43ecb /net | |
parent | 203993cd89ec6395b6bfba948c2e424680dd622e (diff) | |
parent | a65e28a0149261776678977962cfa0f90973e1d4 (diff) |
Merge branch 'android-3.0' into imx_3.0.15_android
Conflicts:
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/net/wireless/Makefile
kernel/power/main.c
sound/soc/soc-core.c
Diffstat (limited to 'net')
49 files changed, 7327 insertions, 1063 deletions
diff --git a/net/Kconfig b/net/Kconfig index 878151c772c9..919cf9a82122 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -79,6 +79,20 @@ source "net/netlabel/Kconfig" endif # if INET +config ANDROID_PARANOID_NETWORK + bool "Only allow certain groups to create sockets" + default y + help + none + +config NET_ACTIVITY_STATS + bool "Network activity statistics tracking" + default y + help + Network activity statistics are useful for tracking wireless + modem activity on 2G, 3G, 4G wireless networks. Counts number of + transmissions and groups them in specified time buckets. + config NETWORK_SECMARK bool "Security Marking" help @@ -217,7 +231,7 @@ source "net/dns_resolver/Kconfig" source "net/batman-adv/Kconfig" config RPS - boolean + boolean "RPS" depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y diff --git a/net/Makefile b/net/Makefile index a51d9465e628..54808aba6c1a 100644 --- a/net/Makefile +++ b/net/Makefile @@ -68,3 +68,4 @@ obj-$(CONFIG_WIMAX) += wimax/ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ obj-$(CONFIG_CEPH_LIB) += ceph/ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ +obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o diff --git a/net/activity_stats.c b/net/activity_stats.c new file mode 100644 index 000000000000..8a3e93470069 --- /dev/null +++ b/net/activity_stats.c @@ -0,0 +1,115 @@ +/* net/activity_stats.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + */ + +#include <linux/proc_fs.h> +#include <linux/suspend.h> +#include <net/net_namespace.h> + +/* + * Track transmission rates in buckets (power of 2). + * 1,2,4,8...512 seconds. + * + * Buckets represent the count of network transmissions at least + * N seconds apart, where N is 1 << bucket index. + */ +#define BUCKET_MAX 10 + +/* Track network activity frequency */ +static unsigned long activity_stats[BUCKET_MAX]; +static ktime_t last_transmit; +static ktime_t suspend_time; +static DEFINE_SPINLOCK(activity_lock); + +void activity_stats_update(void) +{ + int i; + unsigned long flags; + ktime_t now; + s64 delta; + + spin_lock_irqsave(&activity_lock, flags); + now = ktime_get(); + delta = ktime_to_ns(ktime_sub(now, last_transmit)); + + for (i = BUCKET_MAX - 1; i >= 0; i--) { + /* + * Check if the time delta between network activity is within the + * minimum bucket range. + */ + if (delta < (1000000000ULL << i)) + continue; + + activity_stats[i]++; + last_transmit = now; + break; + } + spin_unlock_irqrestore(&activity_lock, flags); +} + +static int activity_stats_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int i; + int len; + char *p = page; + + /* Only print if offset is 0, or we have enough buffer space */ + if (off || count < (30 * BUCKET_MAX + 22)) + return -ENOMEM; + + len = snprintf(p, count, "Min Bucket(sec) Count\n"); + count -= len; + p += len; + + for (i = 0; i < BUCKET_MAX; i++) { + len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]); + count -= len; + p += len; + } + *eof = 1; + + return p - page; +} + +static int activity_stats_notifier(struct notifier_block *nb, + unsigned long event, void *dummy) +{ + switch (event) { + case PM_SUSPEND_PREPARE: + suspend_time = ktime_get_real(); + break; + + case PM_POST_SUSPEND: + suspend_time = ktime_sub(ktime_get_real(), suspend_time); + last_transmit = ktime_sub(last_transmit, suspend_time); + } + + return 0; +} + +static struct notifier_block activity_stats_notifier_block = { + .notifier_call = activity_stats_notifier, +}; + +static int __init activity_stats_init(void) +{ + create_proc_read_entry("activity", S_IRUGO, + init_net.proc_net_stat, activity_stats_read_proc, NULL); + return register_pm_notifier(&activity_stats_notifier_block); +} + +subsys_initcall(activity_stats_init); + diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 6ae5ec508587..bfb3dc03c9de 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -6,6 +6,7 @@ menuconfig BT tristate "Bluetooth subsystem support" depends on NET && !S390 depends on RFKILL || !RFKILL + select CRYPTO help Bluetooth is low-cost, low-power, short-range wireless technology. It was designed as a replacement for cables and other short-range @@ -22,6 +23,7 @@ menuconfig BT BNEP Module (Bluetooth Network Encapsulation Protocol) CMTP Module (CAPI Message Transport Protocol) HIDP Module (Human Interface Device Protocol) + SMP Module (Security Manager Protocol) Say Y here to compile Bluetooth support into the kernel or say M to compile it as module (bluetooth). @@ -36,11 +38,18 @@ if BT != n config BT_L2CAP bool "L2CAP protocol support" select CRC16 + select CRYPTO + select CRYPTO_BLKCIPHER + select CRYPTO_AES + select CRYPTO_ECB help L2CAP (Logical Link Control and Adaptation Protocol) provides connection oriented and connection-less data transport. L2CAP support is required for most Bluetooth applications. + Also included is support for SMP (Security Manager Protocol) which + is the security layer on top of LE (Low Energy) links. + config BT_SCO bool "SCO links support" help diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index f04fe9a9d634..9b67f3d08fa4 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -9,5 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/ obj-$(CONFIG_BT_HIDP) += hidp/ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o -bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o +bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o bluetooth-$(CONFIG_BT_SCO) += sco.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 8add9b499912..7c73a10d7edc 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -40,6 +40,15 @@ #include <net/bluetooth/bluetooth.h> +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> +#endif + +#ifndef CONFIG_BT_SOCK_DEBUG +#undef BT_DBG +#define BT_DBG(D...) +#endif + #define VERSION "2.16" /* Bluetooth sockets */ @@ -125,11 +134,40 @@ int bt_sock_unregister(int proto) } EXPORT_SYMBOL(bt_sock_unregister); +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +static inline int current_has_bt_admin(void) +{ + return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN)); +} + +static inline int current_has_bt(void) +{ + return (current_has_bt_admin() || in_egroup_p(AID_NET_BT)); +} +# else +static inline int current_has_bt_admin(void) +{ + return 1; +} + +static inline int current_has_bt(void) +{ + return 1; +} +#endif + static int bt_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err; + if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO || + proto == BTPROTO_L2CAP) { + if (!current_has_bt()) + return -EPERM; + } else if (!current_has_bt_admin()) + return -EPERM; + if (net != &init_net) return -EAFNOSUPPORT; @@ -494,9 +532,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) BT_DBG("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { - set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { err = -EINPROGRESS; break; @@ -510,12 +547,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 8e6c06158f8e..e7ee5314f39a 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -155,6 +155,7 @@ struct bnep_session { unsigned int role; unsigned long state; unsigned long flags; + atomic_t terminate; struct task_struct *task; struct ethhdr eh; diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index ca39fcf010ce..d9edfe8bf9d6 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -484,9 +484,11 @@ static int bnep_session(void *arg) init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); - while (!kthread_should_stop()) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); + if (atomic_read(&s->terminate)) + break; /* RX */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); @@ -504,7 +506,7 @@ static int bnep_session(void *arg) schedule(); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); /* Cleanup session */ @@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req) down_read(&bnep_session_sem); s = __bnep_get_session(req->dst); - if (s) - kthread_stop(s->task); - else + if (s) { + atomic_inc(&s->terminate); + wake_up_process(s->task); + } else err = -ENOENT; up_read(&bnep_session_sem); diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 744233cba244..040f67b12978 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -326,7 +326,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) { struct capi_ctr *ctrl = &session->ctrl; struct cmtp_application *application; - __u16 cmd, appl; + __u16 appl; __u32 contr; BT_DBG("session %p skb %p len %d", session, skb, skb->len); @@ -344,7 +344,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) return; } - cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data)); appl = CAPIMSG_APPID(skb->data); contr = CAPIMSG_CONTROL(skb->data); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index bcd158f40bb9..33c4e0cd83b1 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -53,11 +53,13 @@ static void hci_le_connect(struct hci_conn *conn) conn->state = BT_CONNECT; conn->out = 1; conn->link_mode |= HCI_LM_MASTER; + conn->sec_level = BT_SECURITY_LOW; memset(&cp, 0, sizeof(cp)); cp.scan_interval = cpu_to_le16(0x0004); cp.scan_window = cpu_to_le16(0x0004); bacpy(&cp.peer_addr, &conn->dst); + cp.peer_addr_type = conn->dst_type; cp.conn_interval_min = cpu_to_le16(0x0008); cp.conn_interval_max = cpu_to_le16(0x0100); cp.supervision_timeout = cpu_to_le16(0x0064); @@ -203,6 +205,55 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, } EXPORT_SYMBOL(hci_le_conn_update); +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], + __u8 ltk[16]) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_start_enc cp; + + BT_DBG("%p", conn); + + memset(&cp, 0, sizeof(cp)); + + cp.handle = cpu_to_le16(conn->handle); + memcpy(cp.ltk, ltk, sizeof(cp.ltk)); + cp.ediv = ediv; + memcpy(cp.rand, rand, sizeof(rand)); + + hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); +} +EXPORT_SYMBOL(hci_le_start_enc); + +void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_ltk_reply cp; + + BT_DBG("%p", conn); + + memset(&cp, 0, sizeof(cp)); + + cp.handle = cpu_to_le16(conn->handle); + memcpy(cp.ltk, ltk, sizeof(ltk)); + + hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); +} +EXPORT_SYMBOL(hci_le_ltk_reply); + +void hci_le_ltk_neg_reply(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_ltk_neg_reply cp; + + BT_DBG("%p", conn); + + memset(&cp, 0, sizeof(cp)); + + cp.handle = cpu_to_le16(conn->handle); + + hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp); +} + /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) { @@ -282,7 +333,8 @@ static void hci_conn_auto_accept(unsigned long arg) hci_dev_unlock(hdev); } -struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst) { struct hci_conn *conn; @@ -310,14 +362,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; break; case SCO_LINK: - if (lmp_esco_capable(hdev)) - conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | - (hdev->esco_type & EDR_ESCO_MASK); - else - conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; - break; + if (!pkt_type) + pkt_type = SCO_ESCO_MASK; case ESCO_LINK: - conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + if (!pkt_type) + pkt_type = ALL_ESCO_MASK; + if (lmp_esco_capable(hdev)) { + /* HCI Setup Synchronous Connection Command uses + reverse logic on the EDR_ESCO_MASK bits */ + conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) & + hdev->esco_type; + } else { + /* Legacy HCI Add Sco Connection Command uses a + shifted bitmask */ + conn->pkt_type = (pkt_type << 5) & hdev->pkt_type & + SCO_PTYPE_MASK; + } break; } @@ -441,7 +501,9 @@ EXPORT_SYMBOL(hci_get_route); /* Create SCO, ACL or LE connection. * Device _must_ be locked */ -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, + __u16 pkt_type, bdaddr_t *dst, + __u8 sec_level, __u8 auth_type) { struct hci_conn *acl; struct hci_conn *sco; @@ -450,14 +512,23 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 BT_DBG("%s dst %s", hdev->name, batostr(dst)); if (type == LE_LINK) { + struct adv_entry *entry; + le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); if (le) return ERR_PTR(-EBUSY); - le = hci_conn_add(hdev, LE_LINK, dst); + + entry = hci_find_adv_entry(hdev, dst); + if (!entry) + return ERR_PTR(-EHOSTUNREACH); + + le = hci_conn_add(hdev, LE_LINK, 0, dst); if (!le) return ERR_PTR(-ENOMEM); - if (le->state == BT_OPEN) - hci_le_connect(le); + + le->dst_type = entry->bdaddr_type; + + hci_le_connect(le); hci_conn_hold(le); @@ -466,7 +537,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { - acl = hci_conn_add(hdev, ACL_LINK, dst); + acl = hci_conn_add(hdev, ACL_LINK, 0, dst); if (!acl) return NULL; } @@ -485,7 +556,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sco = hci_conn_hash_lookup_ba(hdev, type, dst); if (!sco) { - sco = hci_conn_add(hdev, type, dst); + sco = hci_conn_add(hdev, type, pkt_type, dst); if (!sco) { hci_conn_put(acl); return NULL; @@ -500,7 +571,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { acl->power_save = 1; - hci_conn_enter_active_mode(acl); + hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { /* defer SCO setup until mode change completed */ @@ -548,9 +619,15 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { struct hci_cp_auth_requested cp; + + /* encrypt must be pending if auth is also pending */ + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); + cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); + if (conn->key_type != 0xff) + set_bit(HCI_CONN_REAUTH_PEND, &conn->pend); } return 0; @@ -634,9 +711,7 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) if (sec_level != BT_SECURITY_HIGH) return 1; /* Accept if non-secure is required */ - if (conn->key_type == HCI_LK_AUTH_COMBINATION || - (conn->key_type == HCI_LK_COMBINATION && - conn->pin_length == 16)) + if (conn->sec_level == BT_SECURITY_HIGH) return 1; return 0; /* Reject not secure link */ @@ -679,7 +754,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role) EXPORT_SYMBOL(hci_conn_switch_role); /* Enter active mode */ -void hci_conn_enter_active_mode(struct hci_conn *conn) +void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) { struct hci_dev *hdev = conn->hdev; @@ -688,7 +763,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn) if (test_bit(HCI_RAW, &hdev->flags)) return; - if (conn->mode != HCI_CM_SNIFF || !conn->power_save) + if (conn->mode != HCI_CM_SNIFF) + goto timer; + + if (!conn->power_save && !force_active) goto timer; if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { @@ -829,6 +907,15 @@ int hci_get_conn_list(void __user *arg) (ci + n)->out = c->out; (ci + n)->state = c->state; (ci + n)->link_mode = c->link_mode; + if (c->type == SCO_LINK) { + (ci + n)->mtu = hdev->sco_mtu; + (ci + n)->cnt = hdev->sco_cnt; + (ci + n)->pkts = hdev->sco_pkts; + } else { + (ci + n)->mtu = hdev->acl_mtu; + (ci + n)->cnt = hdev->acl_cnt; + (ci + n)->pkts = hdev->acl_pkts; + } if (++n >= req.conn_num) break; } @@ -865,6 +952,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) ci.out = conn->out; ci.state = conn->state; ci.link_mode = conn->link_mode; + if (req.type == SCO_LINK) { + ci.mtu = hdev->sco_mtu; + ci.cnt = hdev->sco_cnt; + ci.pkts = hdev->sco_pkts; + } else { + ci.mtu = hdev->acl_mtu; + ci.cnt = hdev->acl_cnt; + ci.pkts = hdev->acl_pkts; + } } hci_dev_unlock_bh(hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 815269b07f20..3b3919864078 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -42,6 +42,7 @@ #include <linux/notifier.h> #include <linux/rfkill.h> #include <linux/timer.h> +#include <linux/crypto.h> #include <net/sock.h> #include <asm/system.h> @@ -145,7 +146,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, switch (hdev->req_status) { case HCI_REQ_DONE: - err = -bt_err(hdev->req_result); + err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: @@ -539,7 +540,7 @@ int hci_dev_open(__u16 dev) ret = __hci_request(hdev, hci_init_req, 0, msecs_to_jiffies(HCI_INIT_TIMEOUT)); - if (lmp_le_capable(hdev)) + if (lmp_host_le_capable(hdev)) ret = __hci_request(hdev, hci_le_init_req, 0, msecs_to_jiffies(HCI_INIT_TIMEOUT)); @@ -1056,6 +1057,42 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, return 0; } +struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) +{ + struct link_key *k; + + list_for_each_entry(k, &hdev->link_keys, list) { + struct key_master_id *id; + + if (k->type != HCI_LK_SMP_LTK) + continue; + + if (k->dlen != sizeof(*id)) + continue; + + id = (void *) &k->data; + if (id->ediv == ediv && + (memcmp(rand, id->rand, sizeof(id->rand)) == 0)) + return k; + } + + return NULL; +} +EXPORT_SYMBOL(hci_find_ltk); + +struct link_key *hci_find_link_key_type(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type) +{ + struct link_key *k; + + list_for_each_entry(k, &hdev->link_keys, list) + if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0) + return k; + + return NULL; +} +EXPORT_SYMBOL(hci_find_link_key_type); + int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) { @@ -1111,6 +1148,44 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, return 0; } +int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, + u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]) +{ + struct link_key *key, *old_key; + struct key_master_id *id; + u8 old_key_type; + + BT_DBG("%s addr %s", hdev->name, batostr(bdaddr)); + + old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK); + if (old_key) { + key = old_key; + old_key_type = old_key->type; + } else { + key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC); + if (!key) + return -ENOMEM; + list_add(&key->list, &hdev->link_keys); + old_key_type = 0xff; + } + + key->dlen = sizeof(*id); + + bacpy(&key->bdaddr, bdaddr); + memcpy(key->val, ltk, sizeof(key->val)); + key->type = HCI_LK_SMP_LTK; + key->pin_len = key_size; + + id = (void *) &key->data; + id->ediv = ediv; + memcpy(id->rand, rand, sizeof(id->rand)); + + if (new_key) + mgmt_new_key(hdev->id, key, old_key_type); + + return 0; +} + int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *key; @@ -1134,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg) BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); - clear_bit(HCI_RESET, &hdev->flags); tasklet_schedule(&hdev->cmd_task); } @@ -1202,6 +1276,169 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, return 0; } +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr) +{ + struct list_head *p; + + list_for_each(p, &hdev->blacklist) { + struct bdaddr_list *b; + + b = list_entry(p, struct bdaddr_list, list); + + if (bacmp(bdaddr, &b->bdaddr) == 0) + return b; + } + + return NULL; +} + +int hci_blacklist_clear(struct hci_dev *hdev) +{ + struct list_head *p, *n; + + list_for_each_safe(p, n, &hdev->blacklist) { + struct bdaddr_list *b; + + b = list_entry(p, struct bdaddr_list, list); + + list_del(p); + kfree(b); + } + + return 0; +} + +int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct bdaddr_list *entry; + int err; + + if (bacmp(bdaddr, BDADDR_ANY) == 0) + return -EBADF; + + hci_dev_lock_bh(hdev); + + if (hci_blacklist_lookup(hdev, bdaddr)) { + err = -EEXIST; + goto err; + } + + entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); + if (!entry) { + return -ENOMEM; + goto err; + } + + bacpy(&entry->bdaddr, bdaddr); + + list_add(&entry->list, &hdev->blacklist); + + err = 0; + +err: + hci_dev_unlock_bh(hdev); + return err; +} + +int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct bdaddr_list *entry; + int err = 0; + + hci_dev_lock_bh(hdev); + + if (bacmp(bdaddr, BDADDR_ANY) == 0) { + hci_blacklist_clear(hdev); + goto done; + } + + entry = hci_blacklist_lookup(hdev, bdaddr); + if (!entry) { + err = -ENOENT; + goto done; + } + + list_del(&entry->list); + kfree(entry); + +done: + hci_dev_unlock_bh(hdev); + return err; +} + +static void hci_clear_adv_cache(unsigned long arg) +{ + struct hci_dev *hdev = (void *) arg; + + hci_dev_lock(hdev); + + hci_adv_entries_clear(hdev); + + hci_dev_unlock(hdev); +} + +int hci_adv_entries_clear(struct hci_dev *hdev) +{ + struct adv_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) { + list_del(&entry->list); + kfree(entry); + } + + BT_DBG("%s adv cache cleared", hdev->name); + + return 0; +} + +struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct adv_entry *entry; + + list_for_each_entry(entry, &hdev->adv_entries, list) + if (bacmp(bdaddr, &entry->bdaddr) == 0) + return entry; + + return NULL; +} + +static inline int is_connectable_adv(u8 evt_type) +{ + if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND) + return 1; + + return 0; +} + +int hci_add_adv_entry(struct hci_dev *hdev, + struct hci_ev_le_advertising_info *ev) +{ + struct adv_entry *entry; + + if (!is_connectable_adv(ev->evt_type)) + return -EINVAL; + + /* Only new entries should be added to adv_entries. So, if + * bdaddr was found, don't add it. */ + if (hci_find_adv_entry(hdev, &ev->bdaddr)) + return 0; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, &ev->bdaddr); + entry->bdaddr_type = ev->bdaddr_type; + + list_add(&entry->list, &hdev->adv_entries); + + BT_DBG("%s adv entry added: address %s type %u", hdev->name, + batostr(&entry->bdaddr), entry->bdaddr_type); + + return 0; +} + /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { @@ -1268,6 +1505,10 @@ int hci_register_dev(struct hci_dev *hdev) INIT_LIST_HEAD(&hdev->remote_oob_data); + INIT_LIST_HEAD(&hdev->adv_entries); + setup_timer(&hdev->adv_timer, hci_clear_adv_cache, + (unsigned long) hdev); + INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->power_off, hci_power_off); setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); @@ -1282,6 +1523,11 @@ int hci_register_dev(struct hci_dev *hdev) if (!hdev->workqueue) goto nomem; + hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hdev->tfm)) + BT_INFO("Failed to load transform for ecb(aes): %ld", + PTR_ERR(hdev->tfm)); + hci_register_sysfs(hdev); hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, @@ -1330,6 +1576,9 @@ int hci_unregister_dev(struct hci_dev *hdev) !test_bit(HCI_SETUP, &hdev->flags)) mgmt_index_removed(hdev->id); + if (!IS_ERR(hdev->tfm)) + crypto_free_blkcipher(hdev->tfm); + hci_notify(hdev, HCI_DEV_UNREG); if (hdev->rfkill) { @@ -1340,6 +1589,7 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_unregister_sysfs(hdev); hci_del_off_timer(hdev); + del_timer(&hdev->adv_timer); destroy_workqueue(hdev->workqueue); @@ -1348,6 +1598,7 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_remote_oob_data_clear(hdev); + hci_adv_entries_clear(hdev); hci_dev_unlock_bh(hdev); __hci_dev_put(hdev); @@ -1891,7 +2142,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev) while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); - hci_conn_enter_active_mode(conn); + hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); hci_send_frame(skb); hdev->acl_last_tx = jiffies; @@ -2030,7 +2281,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { register struct hci_proto *hp; - hci_conn_enter_active_mode(conn); + hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); /* Send to upper protocol */ hp = hci_proto[HCI_PROTO_L2CAP]; @@ -2156,7 +2407,10 @@ static void hci_cmd_task(unsigned long arg) if (hdev->sent_cmd) { atomic_dec(&hdev->cmd_cnt); hci_send_frame(skb); - mod_timer(&hdev->cmd_timer, + if (test_bit(HCI_RESET, &hdev->flags)) + del_timer(&hdev->cmd_timer); + else + mod_timer(&hdev->cmd_timer, jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); } else { skb_queue_head(&hdev->cmd_q, skb); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 01aa7e73d9bf..5a7074a7b5b8 100644..100755 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -45,6 +45,8 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> +static int enable_le; + /* Handle HCI Event packets */ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) @@ -525,6 +527,20 @@ static void hci_setup_event_mask(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); } +static void hci_set_le_support(struct hci_dev *hdev) +{ + struct hci_cp_write_le_host_supported cp; + + memset(&cp, 0, sizeof(cp)); + + if (enable_le) { + cp.le = 1; + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); + } + + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); +} + static void hci_setup(struct hci_dev *hdev) { hci_setup_event_mask(hdev); @@ -542,6 +558,17 @@ static void hci_setup(struct hci_dev *hdev) if (hdev->features[7] & LMP_INQ_TX_PWR) hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); + + if (hdev->features[7] & LMP_EXTFEATURES) { + struct hci_cp_read_local_ext_features cp; + + cp.page = 0x01; + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, + sizeof(cp), &cp); + } + + if (hdev->features[4] & LMP_LE) + hci_set_le_support(hdev); } static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) @@ -658,6 +685,21 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb hdev->features[6], hdev->features[7]); } +static void hci_cc_read_local_ext_features(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_read_local_ext_features *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->extfeatures, rp->features, 8); + + hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); +} + static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_buffer_size *rp = (void *) skb->data; @@ -841,6 +883,72 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, rp->randomizer, rp->status); } +static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_set_scan_enable *cp; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); + if (!cp) + return; + + hci_dev_lock(hdev); + + if (cp->enable == 0x01) { + del_timer(&hdev->adv_timer); + hci_adv_entries_clear(hdev); + } else if (cp->enable == 0x00) { + mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); + } + + hci_dev_unlock(hdev); +} + +static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_le_ltk_reply *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); +} + +static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); +} + +static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_read_local_ext_features cp; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + cp.page = 0x01; + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); +} + static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { BT_DBG("%s status 0x%x", hdev->name, status); @@ -884,7 +992,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) } } else { if (!conn) { - conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); + conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr); if (conn) { conn->out = 1; conn->link_mode |= HCI_LM_MASTER; @@ -1207,17 +1315,24 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) } } else { if (!conn) { - conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); - if (conn) + conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr); + if (conn) { + conn->dst_type = cp->peer_addr_type; conn->out = 1; - else + } else { BT_ERR("No memory for new connection"); + } } } hci_dev_unlock(hdev); } +static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) +{ + BT_DBG("%s status 0x%x", hdev->name, status); +} + static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -1347,6 +1462,15 @@ unlock: hci_conn_check_pending(hdev); } +static inline bool is_sco_active(struct hci_dev *hdev) +{ + if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) || + (hci_conn_hash_lookup_state(hdev, ESCO_LINK, + BT_CONNECTED))) + return true; + return false; +} + static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_request *ev = (void *) skb->data; @@ -1371,7 +1495,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { - conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); + /* pkt_type not yet used for incoming connections */ + conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr); if (!conn) { BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); @@ -1389,7 +1514,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk bacpy(&cp.bdaddr, &ev->bdaddr); - if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) + if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER) + || is_sco_active(hdev))) cp.role = 0x00; /* Become master */ else cp.role = 0x01; /* Remain slave */ @@ -1461,51 +1587,58 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); - if (conn) { - if (!ev->status) { + if (!conn) + goto unlock; + + if (!ev->status) { + if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && + test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { + BT_INFO("re-auth of legacy device is not possible."); + } else { conn->link_mode |= HCI_LM_AUTH; conn->sec_level = conn->pending_sec_level; - } else { - mgmt_auth_failed(hdev->id, &conn->dst, ev->status); } + } else { + mgmt_auth_failed(hdev->id, &conn->dst, ev->status); + } - clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); + clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); + clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); - if (conn->state == BT_CONFIG) { - if (!ev->status && hdev->ssp_mode > 0 && - conn->ssp_mode > 0) { - struct hci_cp_set_conn_encrypt cp; - cp.handle = ev->handle; - cp.encrypt = 0x01; - hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, - sizeof(cp), &cp); - } else { - conn->state = BT_CONNECTED; - hci_proto_connect_cfm(conn, ev->status); - hci_conn_put(conn); - } + if (conn->state == BT_CONFIG) { + if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), + &cp); } else { - hci_auth_cfm(conn, ev->status); - - hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; + conn->state = BT_CONNECTED; + hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } + } else { + hci_auth_cfm(conn, ev->status); - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { - if (!ev->status) { - struct hci_cp_set_conn_encrypt cp; - cp.handle = ev->handle; - cp.encrypt = 0x01; - hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, - sizeof(cp), &cp); - } else { - clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); - hci_encrypt_cfm(conn, ev->status, 0x00); - } + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); + } + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { + if (!ev->status) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), + &cp); + } else { + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); + hci_encrypt_cfm(conn, ev->status, 0x00); } } +unlock: hci_dev_unlock(hdev); } @@ -1556,6 +1689,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * /* Encryption implies authentication */ conn->link_mode |= HCI_LM_AUTH; conn->link_mode |= HCI_LM_ENCRYPT; + conn->sec_level = conn->pending_sec_level; } else conn->link_mode &= ~HCI_LM_ENCRYPT; } @@ -1759,6 +1893,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_read_local_features(hdev, skb); break; + case HCI_OP_READ_LOCAL_EXT_FEATURES: + hci_cc_read_local_ext_features(hdev, skb); + break; + case HCI_OP_READ_BUFFER_SIZE: hci_cc_read_buffer_size(hdev, skb); break; @@ -1815,6 +1953,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_user_confirm_neg_reply(hdev, skb); break; + case HCI_OP_LE_SET_SCAN_ENABLE: + hci_cc_le_set_scan_enable(hdev, skb); + break; + + case HCI_OP_LE_LTK_REPLY: + hci_cc_le_ltk_reply(hdev, skb); + break; + + case HCI_OP_LE_LTK_NEG_REPLY: + hci_cc_le_ltk_neg_reply(hdev, skb); + break; + + case HCI_OP_WRITE_LE_HOST_SUPPORTED: + hci_cc_write_le_host_supported(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%x", hdev->name, opcode); break; @@ -1893,6 +2047,10 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cs_le_create_conn(hdev, ev->status); break; + case HCI_OP_LE_START_ENC: + hci_cs_le_start_enc(hdev, ev->status); + break; + default: BT_DBG("%s opcode 0x%x", hdev->name, opcode); break; @@ -2332,6 +2490,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu hci_conn_add_sysfs(conn); break; + case 0x10: /* Connection Accept Timeout */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ @@ -2651,12 +2810,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); if (!conn) { - conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); + conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr); if (!conn) { BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); return; } + + conn->dst_type = ev->bdaddr_type; } if (ev->status) { @@ -2669,6 +2830,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff mgmt_connected(hdev->id, &ev->bdaddr); + conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; @@ -2681,6 +2843,64 @@ unlock: hci_dev_unlock(hdev); } +static inline void hci_le_adv_report_evt(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_ev_le_advertising_info *ev; + u8 num_reports; + + num_reports = skb->data[0]; + ev = (void *) &skb->data[1]; + + hci_dev_lock(hdev); + + hci_add_adv_entry(hdev, ev); + + while (--num_reports) { + ev = (void *) (ev->data + ev->length + 1); + hci_add_adv_entry(hdev, ev); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_ev_le_ltk_req *ev = (void *) skb->data; + struct hci_cp_le_ltk_reply cp; + struct hci_cp_le_ltk_neg_reply neg; + struct hci_conn *conn; + struct link_key *ltk; + + BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn == NULL) + goto not_found; + + ltk = hci_find_ltk(hdev, ev->ediv, ev->random); + if (ltk == NULL) + goto not_found; + + memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); + cp.handle = cpu_to_le16(conn->handle); + conn->pin_length = ltk->pin_len; + + hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); + + hci_dev_unlock(hdev); + + return; + +not_found: + neg.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); + hci_dev_unlock(hdev); +} + static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_meta *le_ev = (void *) skb->data; @@ -2692,6 +2912,14 @@ static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_conn_complete_evt(hdev, skb); break; + case HCI_EV_LE_ADVERTISING_REPORT: + hci_le_adv_report_evt(hdev, skb); + break; + + case HCI_EV_LE_LTK_REQ: + hci_le_ltk_request_evt(hdev, skb); + break; + default: break; } @@ -2885,3 +3113,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) hci_send_to_sock(hdev, skb, NULL); kfree_skb(skb); } + +module_param(enable_le, bool, 0444); +MODULE_PARM_DESC(enable_le, "Enable LE support"); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 295e4a88fff8..ff02cf5e77cc 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -180,82 +180,24 @@ static int hci_sock_release(struct socket *sock) return 0; } -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) -{ - struct list_head *p; - - list_for_each(p, &hdev->blacklist) { - struct bdaddr_list *b; - - b = list_entry(p, struct bdaddr_list, list); - - if (bacmp(bdaddr, &b->bdaddr) == 0) - return b; - } - - return NULL; -} - -static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg) +static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; - struct bdaddr_list *entry; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; - if (bacmp(&bdaddr, BDADDR_ANY) == 0) - return -EBADF; - - if (hci_blacklist_lookup(hdev, &bdaddr)) - return -EEXIST; - - entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - bacpy(&entry->bdaddr, &bdaddr); - - list_add(&entry->list, &hdev->blacklist); - - return 0; -} - -int hci_blacklist_clear(struct hci_dev *hdev) -{ - struct list_head *p, *n; - - list_for_each_safe(p, n, &hdev->blacklist) { - struct bdaddr_list *b; - - b = list_entry(p, struct bdaddr_list, list); - - list_del(p); - kfree(b); - } - - return 0; + return hci_blacklist_add(hdev, &bdaddr); } -static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg) +static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; - struct bdaddr_list *entry; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; - if (bacmp(&bdaddr, BDADDR_ANY) == 0) - return hci_blacklist_clear(hdev); - - entry = hci_blacklist_lookup(hdev, &bdaddr); - if (!entry) - return -ENOENT; - - list_del(&entry->list); - kfree(entry); - - return 0; + return hci_blacklist_del(hdev, &bdaddr); } /* Ioctls that require bound socket */ @@ -290,12 +232,12 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EACCES; - return hci_blacklist_add(hdev, (void __user *) arg); + return hci_sock_blacklist_add(hdev, (void __user *) arg); case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EACCES; - return hci_blacklist_del(hdev, (void __user *) arg); + return hci_sock_blacklist_del(hdev, (void __user *) arg); default: if (hdev->ioctl) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 43b4c2deb7cc..fb68f344c34a 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -764,6 +764,7 @@ static int hidp_session(void *arg) up_write(&hidp_session_sem); + kfree(session->rd_data); kfree(session); return 0; } @@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session, err = input_register_device(input); if (err < 0) { - hci_conn_put_device(session->conn); + input_free_device(input); + session->input = NULL; return err; } @@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, } err = hid_add_device(session->hid); - if (err < 0) - goto err_add_device; + if (err < 0) { + atomic_inc(&session->terminate); + wake_up_process(session->task); + up_write(&hidp_session_sem); + return err; + } if (session->input) { hidp_send_ctrl_message(session, @@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, up_write(&hidp_session_sem); return 0; -err_add_device: - hid_destroy_device(session->hid); - session->hid = NULL; - atomic_inc(&session->terminate); - wake_up_process(session->task); - unlink: hidp_del_timer(session); @@ -1090,7 +1090,6 @@ purge: failed: up_write(&hidp_session_sem); - input_free_device(session->input); kfree(session); return err; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 7705e26e699f..5a0ce738751e 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -54,26 +54,39 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> +#include <net/bluetooth/smp.h> int disable_ertm; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; static u8 l2cap_fixed_chan[8] = { 0x02, }; -static struct workqueue_struct *_busy_wq; - -LIST_HEAD(chan_list); -DEFINE_RWLOCK(chan_list_lock); - -static void l2cap_busy_work(struct work_struct *work); +static LIST_HEAD(chan_list); +static DEFINE_RWLOCK(chan_list_lock); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); +static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, + void *data); static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); +static void l2cap_send_disconn_req(struct l2cap_conn *conn, + struct l2cap_chan *chan, int err); static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); /* ---- L2CAP channels ---- */ + +static inline void chan_hold(struct l2cap_chan *c) +{ + atomic_inc(&c->refcnt); +} + +static inline void chan_put(struct l2cap_chan *c) +{ + if (atomic_dec_and_test(&c->refcnt)) + kfree(c); +} + static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; @@ -204,6 +217,62 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn) return 0; } +static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) +{ + BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); + + if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) + chan_hold(chan); +} + +static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer) +{ + BT_DBG("chan %p state %d", chan, chan->state); + + if (timer_pending(timer) && del_timer(timer)) + chan_put(chan); +} + +static void l2cap_state_change(struct l2cap_chan *chan, int state) +{ + chan->state = state; + chan->ops->state_change(chan->data, state); +} + +static void l2cap_chan_timeout(unsigned long arg) +{ + struct l2cap_chan *chan = (struct l2cap_chan *) arg; + struct sock *sk = chan->sk; + int reason; + + BT_DBG("chan %p state %d", chan, chan->state); + + bh_lock_sock(sk); + + if (sock_owned_by_user(sk)) { + /* sk is owned by user. Try again later */ + __set_chan_timer(chan, HZ / 5); + bh_unlock_sock(sk); + chan_put(chan); + return; + } + + if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) + reason = ECONNREFUSED; + else if (chan->state == BT_CONNECT && + chan->sec_level != BT_SECURITY_SDP) + reason = ECONNREFUSED; + else + reason = ETIMEDOUT; + + l2cap_chan_close(chan, reason); + + bh_unlock_sock(sk); + + chan->ops->close(chan->data); + chan_put(chan); +} + struct l2cap_chan *l2cap_chan_create(struct sock *sk) { struct l2cap_chan *chan; @@ -218,6 +287,12 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) list_add(&chan->global_l, &chan_list); write_unlock_bh(&chan_list_lock); + setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan); + + chan->state = BT_OPEN; + + atomic_set(&chan->refcnt, 1); + return chan; } @@ -227,13 +302,11 @@ void l2cap_chan_destroy(struct l2cap_chan *chan) list_del(&chan->global_l); write_unlock_bh(&chan_list_lock); - kfree(chan); + chan_put(chan); } static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { - struct sock *sk = chan->sk; - BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, chan->psm, chan->dcid); @@ -241,7 +314,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->conn = conn; - if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == LE_LINK) { /* LE connection */ chan->omtu = L2CAP_LE_DEFAULT_MTU; @@ -252,7 +325,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->scid = l2cap_alloc_cid(conn); chan->omtu = L2CAP_DEFAULT_MTU; } - } else if (sk->sk_type == SOCK_DGRAM) { + } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { /* Connectionless socket */ chan->scid = L2CAP_CID_CONN_LESS; chan->dcid = L2CAP_CID_CONN_LESS; @@ -264,20 +337,20 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->omtu = L2CAP_DEFAULT_MTU; } - sock_hold(sk); + chan_hold(chan); list_add(&chan->list, &conn->chan_l); } /* Delete channel. * Must be called on the locked socket. */ -void l2cap_chan_del(struct l2cap_chan *chan, int err) +static void l2cap_chan_del(struct l2cap_chan *chan, int err) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sock *parent = bt_sk(sk)->parent; - l2cap_sock_clear_timer(sk); + __clear_chan_timer(chan); BT_DBG("chan %p, conn %p, err %d", chan, conn, err); @@ -286,13 +359,13 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) write_lock_bh(&conn->chan_lock); list_del(&chan->list); write_unlock_bh(&conn->chan_lock); - __sock_put(sk); + chan_put(chan); chan->conn = NULL; hci_conn_put(conn->hcon); } - sk->sk_state = BT_CLOSED; + l2cap_state_change(chan, BT_CLOSED); sock_set_flag(sk, SOCK_ZAPPED); if (err) @@ -304,8 +377,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) } else sk->sk_state_change(sk); - if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE && - chan->conf_state & L2CAP_CONF_INPUT_DONE)) + if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && + test_bit(CONF_INPUT_DONE, &chan->conf_state))) return; skb_queue_purge(&chan->tx_q); @@ -313,12 +386,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) if (chan->mode == L2CAP_MODE_ERTM) { struct srej_list *l, *tmp; - del_timer(&chan->retrans_timer); - del_timer(&chan->monitor_timer); - del_timer(&chan->ack_timer); + __clear_retrans_timer(chan); + __clear_monitor_timer(chan); + __clear_ack_timer(chan); skb_queue_purge(&chan->srej_q); - skb_queue_purge(&chan->busy_q); list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { list_del(&l->list); @@ -327,11 +399,86 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) } } -static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) +static void l2cap_chan_cleanup_listen(struct sock *parent) { + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + __clear_chan_timer(chan); + lock_sock(sk); + l2cap_chan_close(chan, ECONNRESET); + release_sock(sk); + chan->ops->close(chan->data); + } +} + +void l2cap_chan_close(struct l2cap_chan *chan, int reason) +{ + struct l2cap_conn *conn = chan->conn; struct sock *sk = chan->sk; - if (sk->sk_type == SOCK_RAW) { + BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket); + + switch (chan->state) { + case BT_LISTEN: + l2cap_chan_cleanup_listen(sk); + + l2cap_state_change(chan, BT_CLOSED); + sock_set_flag(sk, SOCK_ZAPPED); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && + conn->hcon->type == ACL_LINK) { + __clear_chan_timer(chan); + __set_chan_timer(chan, sk->sk_sndtimeo); + l2cap_send_disconn_req(conn, chan, reason); + } else + l2cap_chan_del(chan, reason); + break; + + case BT_CONNECT2: + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && + conn->hcon->type == ACL_LINK) { + struct l2cap_conn_rsp rsp; + __u16 result; + + if (bt_sk(sk)->defer_setup) + result = L2CAP_CR_SEC_BLOCK; + else + result = L2CAP_CR_BAD_PSM; + l2cap_state_change(chan, BT_DISCONN); + + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, + sizeof(rsp), &rsp); + } + + l2cap_chan_del(chan, reason); + break; + + case BT_CONNECT: + case BT_DISCONN: + l2cap_chan_del(chan, reason); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) +{ + if (chan->chan_type == L2CAP_CHAN_RAW) { switch (chan->sec_level) { case BT_SECURITY_HIGH: return HCI_AT_DEDICATED_BONDING_MITM; @@ -371,7 +518,7 @@ static inline int l2cap_check_security(struct l2cap_chan *chan) return hci_conn_security(conn->hcon, chan->sec_level, auth_type); } -u8 l2cap_get_ident(struct l2cap_conn *conn) +static u8 l2cap_get_ident(struct l2cap_conn *conn) { u8 id; @@ -393,7 +540,7 @@ u8 l2cap_get_ident(struct l2cap_conn *conn) return id; } -void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) +static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); u8 flags; @@ -408,6 +555,8 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d else flags = ACL_START; + bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; + hci_send_acl(conn->hcon, skb, flags); } @@ -415,13 +564,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) { struct sk_buff *skb; struct l2cap_hdr *lh; - struct l2cap_pinfo *pi = l2cap_pi(chan->sk); struct l2cap_conn *conn = chan->conn; - struct sock *sk = (struct sock *)pi; int count, hlen = L2CAP_HDR_SIZE + 2; u8 flags; - if (sk->sk_state != BT_CONNECTED) + if (chan->state != BT_CONNECTED) return; if (chan->fcs == L2CAP_FCS_CRC16) @@ -432,15 +579,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) count = min_t(unsigned int, conn->mtu, hlen); control |= L2CAP_CTRL_FRAME_TYPE; - if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; - chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; - } - if (chan->conn_state & L2CAP_CONN_SEND_PBIT) { + if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) control |= L2CAP_CTRL_POLL; - chan->conn_state &= ~L2CAP_CONN_SEND_PBIT; - } skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) @@ -461,14 +604,16 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) else flags = ACL_START; + bt_cb(skb)->force_active = chan->force_active; + hci_send_acl(chan->conn->hcon, skb, flags); } static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) { - if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; - chan->conn_state |= L2CAP_CONN_RNR_SENT; + set_bit(CONN_RNR_SENT, &chan->conn_state); } else control |= L2CAP_SUPER_RCV_READY; @@ -479,7 +624,7 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { - return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND); + return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } static void l2cap_do_start(struct l2cap_chan *chan) @@ -497,7 +642,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); - chan->conf_state |= L2CAP_CONF_CONNECT_PEND; + set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); @@ -533,7 +678,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) } } -void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) +static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) { struct sock *sk; struct l2cap_disconn_req req; @@ -544,9 +689,9 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in sk = chan->sk; if (chan->mode == L2CAP_MODE_ERTM) { - del_timer(&chan->retrans_timer); - del_timer(&chan->monitor_timer); - del_timer(&chan->ack_timer); + __clear_retrans_timer(chan); + __clear_monitor_timer(chan); + __clear_ack_timer(chan); } req.dcid = cpu_to_le16(chan->dcid); @@ -554,7 +699,7 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); - sk->sk_state = BT_DISCONN; + l2cap_state_change(chan, BT_DISCONN); sk->sk_err = err; } @@ -572,13 +717,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn) bh_lock_sock(sk); - if (sk->sk_type != SOCK_SEQPACKET && - sk->sk_type != SOCK_STREAM) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { bh_unlock_sock(sk); continue; } - if (sk->sk_state == BT_CONNECT) { + if (chan->state == BT_CONNECT) { struct l2cap_conn_req req; if (!l2cap_check_security(chan) || @@ -587,15 +731,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - if (!l2cap_mode_supported(chan->mode, - conn->feat_mask) - && chan->conf_state & - L2CAP_CONF_STATE2_DEVICE) { - /* __l2cap_sock_close() calls list_del(chan) + if (!l2cap_mode_supported(chan->mode, conn->feat_mask) + && test_bit(CONF_STATE2_DEVICE, + &chan->conf_state)) { + /* l2cap_chan_close() calls list_del(chan) * so release the lock */ - read_unlock_bh(&conn->chan_lock); - __l2cap_sock_close(sk, ECONNRESET); - read_lock_bh(&conn->chan_lock); + read_unlock(&conn->chan_lock); + l2cap_chan_close(chan, ECONNRESET); + read_lock(&conn->chan_lock); bh_unlock_sock(sk); continue; } @@ -604,12 +747,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn) req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); - chan->conf_state |= L2CAP_CONF_CONNECT_PEND; + set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); - } else if (sk->sk_state == BT_CONNECT2) { + } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(chan->dcid); @@ -624,7 +767,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) parent->sk_data_ready(parent, 0); } else { - sk->sk_state = BT_CONFIG; + l2cap_state_change(chan, BT_CONFIG); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } @@ -636,13 +779,13 @@ static void l2cap_conn_start(struct l2cap_conn *conn) l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); - if (chan->conf_state & L2CAP_CONF_REQ_SENT || + if (test_bit(CONF_REQ_SENT, &chan->conf_state) || rsp.result != L2CAP_CR_SUCCESS) { bh_unlock_sock(sk); continue; } - chan->conf_state |= L2CAP_CONF_REQ_SENT; + set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; @@ -666,7 +809,7 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; - if (state && sk->sk_state != state) + if (state && c->state != state) continue; if (c->scid == cid) { @@ -710,24 +853,16 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) goto clean; } - sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); - if (!sk) - goto clean; - - chan = l2cap_chan_create(sk); - if (!chan) { - l2cap_sock_kill(sk); + chan = pchan->ops->new_connection(pchan->data); + if (!chan) goto clean; - } - l2cap_pi(sk)->chan = chan; + sk = chan->sk; write_lock_bh(&conn->chan_lock); hci_conn_hold(conn->hcon); - l2cap_sock_init(sk, parent); - bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); @@ -735,9 +870,9 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) __l2cap_chan_add(conn, chan); - l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + __set_chan_timer(chan, sk->sk_sndtimeo); - sk->sk_state = BT_CONNECTED; + l2cap_state_change(chan, BT_CONNECTED); parent->sk_data_ready(parent, 0); write_unlock_bh(&conn->chan_lock); @@ -746,6 +881,23 @@ clean: bh_unlock_sock(parent); } +static void l2cap_chan_ready(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sock *parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + + chan->conf_state = 0; + __clear_chan_timer(chan); + + l2cap_state_change(chan, BT_CONNECTED); + sk->sk_state_change(sk); + + if (parent) + parent->sk_data_ready(parent, 0); +} + static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; @@ -763,17 +915,15 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) bh_lock_sock(sk); if (conn->hcon->type == LE_LINK) { - l2cap_sock_clear_timer(sk); - sk->sk_state = BT_CONNECTED; - sk->sk_state_change(sk); - } + if (smp_conn_security(conn, chan->sec_level)) + l2cap_chan_ready(sk); - if (sk->sk_type != SOCK_SEQPACKET && - sk->sk_type != SOCK_STREAM) { - l2cap_sock_clear_timer(sk); - sk->sk_state = BT_CONNECTED; + } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + __clear_chan_timer(chan); + l2cap_state_change(chan, BT_CONNECTED); sk->sk_state_change(sk); - } else if (sk->sk_state == BT_CONNECT) + + } else if (chan->state == BT_CONNECT) l2cap_do_start(chan); bh_unlock_sock(sk); @@ -811,6 +961,45 @@ static void l2cap_info_timeout(unsigned long arg) l2cap_conn_start(conn); } +static void l2cap_conn_del(struct hci_conn *hcon, int err) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan, *l; + struct sock *sk; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + kfree_skb(conn->rx_skb); + + /* Kill channels */ + list_for_each_entry_safe(chan, l, &conn->chan_l, list) { + sk = chan->sk; + bh_lock_sock(sk); + l2cap_chan_del(chan, err); + bh_unlock_sock(sk); + chan->ops->close(chan->data); + } + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + del_timer_sync(&conn->info_timer); + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) + del_timer(&conn->security_timer); + + hcon->l2cap_data = NULL; + kfree(conn); +} + +static void security_timeout(unsigned long arg) +{ + struct l2cap_conn *conn = (void *) arg; + + l2cap_conn_del(conn->hcon, ETIMEDOUT); +} + static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; @@ -842,7 +1031,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) INIT_LIST_HEAD(&conn->chan_l); - if (hcon->type != LE_LINK) + if (hcon->type == LE_LINK) + setup_timer(&conn->security_timer, security_timeout, + (unsigned long) conn); + else setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long) conn); @@ -851,35 +1043,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) return conn; } -static void l2cap_conn_del(struct hci_conn *hcon, int err) -{ - struct l2cap_conn *conn = hcon->l2cap_data; - struct l2cap_chan *chan, *l; - struct sock *sk; - - if (!conn) - return; - - BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); - - kfree_skb(conn->rx_skb); - - /* Kill channels */ - list_for_each_entry_safe(chan, l, &conn->chan_l, list) { - sk = chan->sk; - bh_lock_sock(sk); - l2cap_chan_del(chan, err); - bh_unlock_sock(sk); - l2cap_sock_kill(sk); - } - - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) - del_timer_sync(&conn->info_timer); - - hcon->l2cap_data = NULL; - kfree(conn); -} - static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { write_lock_bh(&conn->chan_lock); @@ -901,7 +1064,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; - if (state && sk->sk_state != state) + if (state && c->state != state) continue; if (c->psm == psm) { @@ -945,10 +1108,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan) auth_type = l2cap_get_auth_type(chan); if (chan->dcid == L2CAP_CID_LE_DATA) - hcon = hci_connect(hdev, LE_LINK, dst, + hcon = hci_connect(hdev, LE_LINK, 0, dst, chan->sec_level, auth_type); else - hcon = hci_connect(hdev, ACL_LINK, dst, + hcon = hci_connect(hdev, ACL_LINK, 0, dst, chan->sec_level, auth_type); if (IS_ERR(hcon)) { @@ -968,15 +1131,14 @@ int l2cap_chan_connect(struct l2cap_chan *chan) l2cap_chan_add(conn, chan); - sk->sk_state = BT_CONNECT; - l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + l2cap_state_change(chan, BT_CONNECT); + __set_chan_timer(chan, sk->sk_sndtimeo); if (hcon->state == BT_CONNECTED) { - if (sk->sk_type != SOCK_SEQPACKET && - sk->sk_type != SOCK_STREAM) { - l2cap_sock_clear_timer(sk); + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + __clear_chan_timer(chan); if (l2cap_check_security(chan)) - sk->sk_state = BT_CONNECTED; + l2cap_state_change(chan, BT_CONNECTED); } else l2cap_do_start(chan); } @@ -997,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk) int timeo = HZ/5; add_wait_queue(sk_sleep(sk), &wait); - while ((chan->unacked_frames > 0 && chan->conn)) { - set_current_state(TASK_INTERRUPTIBLE); - + set_current_state(TASK_INTERRUPTIBLE); + while (chan->unacked_frames > 0 && chan->conn) { if (!timeo) timeo = HZ/5; @@ -1011,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) @@ -1036,7 +1198,7 @@ static void l2cap_monitor_timeout(unsigned long arg) } chan->retry_count++; - __mod_monitor_timer(); + __set_monitor_timer(chan); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); bh_unlock_sock(sk); @@ -1051,9 +1213,9 @@ static void l2cap_retrans_timeout(unsigned long arg) bh_lock_sock(sk); chan->retry_count = 1; - __mod_monitor_timer(); + __set_monitor_timer(chan); - chan->conn_state |= L2CAP_CONN_WAIT_F; + set_bit(CONN_WAIT_F, &chan->conn_state); l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); bh_unlock_sock(sk); @@ -1075,7 +1237,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan) } if (!chan->unacked_frames) - del_timer(&chan->retrans_timer); + __clear_retrans_timer(chan); } void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) @@ -1090,6 +1252,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) else flags = ACL_START; + bt_cb(skb)->force_active = chan->force_active; hci_send_acl(hcon, skb, flags); } @@ -1143,10 +1306,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); control &= L2CAP_CTRL_SAR; - if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; - chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; - } control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); @@ -1164,11 +1325,10 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; - struct sock *sk = chan->sk; u16 control, fcs; int nsent = 0; - if (sk->sk_state != BT_CONNECTED) + if (chan->state != BT_CONNECTED) return -ENOTCONN; while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { @@ -1186,10 +1346,9 @@ int l2cap_ertm_send(struct l2cap_chan *chan) control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); control &= L2CAP_CTRL_SAR; - if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control |= L2CAP_CTRL_FINAL; - chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; - } + control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); @@ -1202,7 +1361,7 @@ int l2cap_ertm_send(struct l2cap_chan *chan) l2cap_do_send(chan, tx_skb); - __mod_retrans_timer(); + __set_retrans_timer(chan); bt_cb(skb)->tx_seq = chan->next_tx_seq; chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; @@ -1241,9 +1400,9 @@ static void l2cap_send_ack(struct l2cap_chan *chan) control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; - chan->conn_state |= L2CAP_CONN_RNR_SENT; + set_bit(CONN_RNR_SENT, &chan->conn_state); l2cap_send_sframe(chan, control); return; } @@ -1451,28 +1610,83 @@ int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t le return size; } -static void l2cap_chan_ready(struct sock *sk) +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { - struct sock *parent = bt_sk(sk)->parent; - struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sk_buff *skb; + u16 control; + int err; - BT_DBG("sk %p, parent %p", sk, parent); + /* Connectionless channel */ + if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { + skb = l2cap_create_connless_pdu(chan, msg, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); - chan->conf_state = 0; - l2cap_sock_clear_timer(sk); + l2cap_do_send(chan, skb); + return len; + } - if (!parent) { - /* Outgoing channel. - * Wake up socket sleeping on connect. - */ - sk->sk_state = BT_CONNECTED; - sk->sk_state_change(sk); - } else { - /* Incoming channel. - * Wake up socket sleeping on accept. - */ - parent->sk_data_ready(parent, 0); + switch (chan->mode) { + case L2CAP_MODE_BASIC: + /* Check outgoing MTU */ + if (len > chan->omtu) + return -EMSGSIZE; + + /* Create a basic PDU */ + skb = l2cap_create_basic_pdu(chan, msg, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + l2cap_do_send(chan, skb); + err = len; + break; + + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + /* Entire SDU fits into one PDU */ + if (len <= chan->remote_mps) { + control = L2CAP_SDU_UNSEGMENTED; + skb = l2cap_create_iframe_pdu(chan, msg, len, control, + 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + __skb_queue_tail(&chan->tx_q, skb); + + if (chan->tx_send_head == NULL) + chan->tx_send_head = skb; + + } else { + /* Segment SDU into multiples PDUs */ + err = l2cap_sar_segment_sdu(chan, msg, len); + if (err < 0) + return err; + } + + if (chan->mode == L2CAP_MODE_STREAMING) { + l2cap_streaming_send(chan); + err = len; + break; + } + + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + test_bit(CONN_WAIT_F, &chan->conn_state)) { + err = len; + break; + } + + err = l2cap_ertm_send(chan); + if (err >= 0) + err = len; + + break; + + default: + BT_DBG("bad state %1.1x", chan->mode); + err = -EBADFD; } + + return err; } /* Copy frame to all raw sockets on that connection */ @@ -1486,7 +1700,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) read_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; - if (sk->sk_type != SOCK_RAW) + if (chan->chan_type != L2CAP_CHAN_RAW) continue; /* Don't send frame to the socket it came from */ @@ -1496,7 +1710,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (!nskb) continue; - if (sock_queue_rcv_skb(sk, nskb)) + if (chan->ops->recv(chan->data, nskb)) kfree_skb(nskb); } read_unlock(&conn->chan_lock); @@ -1655,11 +1869,9 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan) setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); skb_queue_head_init(&chan->srej_q); - skb_queue_head_init(&chan->busy_q); INIT_LIST_HEAD(&chan->srej_l); - INIT_WORK(&chan->busy_work, l2cap_busy_work); sk->sk_backlog_rcv = l2cap_ertm_data_rcv; } @@ -1691,7 +1903,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: - if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE) + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; /* fall through */ @@ -1738,7 +1950,7 @@ done: break; if (chan->fcs == L2CAP_FCS_NONE || - chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { + test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } @@ -1761,7 +1973,7 @@ done: break; if (chan->fcs == L2CAP_FCS_NONE || - chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { + test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } @@ -1813,7 +2025,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) case L2CAP_CONF_FCS: if (val == L2CAP_FCS_NONE) - chan->conf_state |= L2CAP_CONF_NO_FCS_RECV; + set_bit(CONF_NO_FCS_RECV, &chan->conf_state); break; @@ -1833,7 +2045,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: - if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) { + if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; @@ -1866,14 +2078,14 @@ done: result = L2CAP_CONF_UNACCEPT; else { chan->omtu = mtu; - chan->conf_state |= L2CAP_CONF_MTU_DONE; + set_bit(CONF_MTU_DONE, &chan->conf_state); } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; - chan->conf_state |= L2CAP_CONF_MODE_DONE; + set_bit(CONF_MODE_DONE, &chan->conf_state); break; case L2CAP_MODE_ERTM: @@ -1890,7 +2102,7 @@ done: rfc.monitor_timeout = le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); - chan->conf_state |= L2CAP_CONF_MODE_DONE; + set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); @@ -1903,7 +2115,7 @@ done: chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); - chan->conf_state |= L2CAP_CONF_MODE_DONE; + set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); @@ -1918,7 +2130,7 @@ done: } if (result == L2CAP_CONF_SUCCESS) - chan->conf_state |= L2CAP_CONF_OUTPUT_DONE; + set_bit(CONF_OUTPUT_DONE, &chan->conf_state); } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); @@ -1960,7 +2172,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi if (olen == sizeof(rfc)) memcpy(&rfc, (void *)val, olen); - if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) && + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && rfc.mode != chan->mode) return -ECONNREFUSED; @@ -2022,10 +2234,9 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); - if (chan->conf_state & L2CAP_CONF_REQ_SENT) + if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) return; - chan->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; @@ -2125,17 +2336,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd goto response; } - sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); - if (!sk) - goto response; - - chan = l2cap_chan_create(sk); - if (!chan) { - l2cap_sock_kill(sk); + chan = pchan->ops->new_connection(pchan->data); + if (!chan) goto response; - } - l2cap_pi(sk)->chan = chan; + sk = chan->sk; write_lock_bh(&conn->chan_lock); @@ -2143,13 +2348,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd if (__l2cap_get_chan_by_dcid(conn, scid)) { write_unlock_bh(&conn->chan_lock); sock_set_flag(sk, SOCK_ZAPPED); - l2cap_sock_kill(sk); + chan->ops->close(chan->data); goto response; } hci_conn_hold(conn->hcon); - l2cap_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); chan->psm = psm; @@ -2161,29 +2365,29 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd dcid = chan->scid; - l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + __set_chan_timer(chan, sk->sk_sndtimeo); chan->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_check_security(chan)) { if (bt_sk(sk)->defer_setup) { - sk->sk_state = BT_CONNECT2; + l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; parent->sk_data_ready(parent, 0); } else { - sk->sk_state = BT_CONFIG; + l2cap_state_change(chan, BT_CONFIG); result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { - sk->sk_state = BT_CONNECT2; + l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHEN_PEND; } } else { - sk->sk_state = BT_CONNECT2; + l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_NO_INFO; } @@ -2214,10 +2418,10 @@ sendresp: L2CAP_INFO_REQ, sizeof(info), &info); } - if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) && + if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && result == L2CAP_CR_SUCCESS) { u8 buf[128]; - chan->conf_state |= L2CAP_CONF_REQ_SENT; + set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; @@ -2255,31 +2459,29 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd switch (result) { case L2CAP_CR_SUCCESS: - sk->sk_state = BT_CONFIG; + l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; - chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND; + clear_bit(CONF_CONNECT_PEND, &chan->conf_state); - if (chan->conf_state & L2CAP_CONF_REQ_SENT) + if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) break; - chan->conf_state |= L2CAP_CONF_REQ_SENT; - l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, req), req); chan->num_conf_req++; break; case L2CAP_CR_PEND: - chan->conf_state |= L2CAP_CONF_CONNECT_PEND; + set_bit(CONF_CONNECT_PEND, &chan->conf_state); break; default: /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { - sk->sk_state = BT_DISCONN; - l2cap_sock_clear_timer(sk); - l2cap_sock_set_timer(sk, HZ / 5); + l2cap_state_change(chan, BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, HZ / 5); break; } @@ -2293,14 +2495,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd static inline void set_default_fcs(struct l2cap_chan *chan) { - struct l2cap_pinfo *pi = l2cap_pi(chan->sk); - /* FCS is enabled only in ERTM or streaming mode, if one or both * sides request it. */ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) chan->fcs = L2CAP_FCS_NONE; - else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV)) + else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) chan->fcs = L2CAP_FCS_CRC16; } @@ -2367,13 +2567,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr /* Reset config buffer. */ chan->conf_len = 0; - if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE)) + if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) goto unlock; - if (chan->conf_state & L2CAP_CONF_INPUT_DONE) { + if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); - sk->sk_state = BT_CONNECTED; + l2cap_state_change(chan, BT_CONNECTED); chan->next_tx_seq = 0; chan->expected_tx_seq = 0; @@ -2385,9 +2585,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr goto unlock; } - if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) { + if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { u8 buf[64]; - chan->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf), buf); chan->num_conf_req++; @@ -2452,7 +2651,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr default: sk->sk_err = ECONNRESET; - l2cap_sock_set_timer(sk, HZ * 5); + __set_chan_timer(chan, HZ * 5); l2cap_send_disconn_req(conn, chan, ECONNRESET); goto done; } @@ -2460,12 +2659,12 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (flags & 0x01) goto done; - chan->conf_state |= L2CAP_CONF_INPUT_DONE; + set_bit(CONF_INPUT_DONE, &chan->conf_state); - if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) { + if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); - sk->sk_state = BT_CONNECTED; + l2cap_state_change(chan, BT_CONNECTED); chan->next_tx_seq = 0; chan->expected_tx_seq = 0; skb_queue_head_init(&chan->tx_q); @@ -2507,9 +2706,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { - sk->sk_state = BT_DISCONN; - l2cap_sock_clear_timer(sk); - l2cap_sock_set_timer(sk, HZ / 5); + l2cap_state_change(chan, BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, HZ / 5); bh_unlock_sock(sk); return 0; } @@ -2517,7 +2716,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_del(chan, ECONNRESET); bh_unlock_sock(sk); - l2cap_sock_kill(sk); + chan->ops->close(chan->data); return 0; } @@ -2541,9 +2740,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd /* don't delete l2cap channel if sk is owned by user */ if (sock_owned_by_user(sk)) { - sk->sk_state = BT_DISCONN; - l2cap_sock_clear_timer(sk); - l2cap_sock_set_timer(sk, HZ / 5); + l2cap_state_change(chan,BT_DISCONN); + __clear_chan_timer(chan); + __set_chan_timer(chan, HZ / 5); bh_unlock_sock(sk); return 0; } @@ -2551,7 +2750,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_del(chan, 0); bh_unlock_sock(sk); - l2cap_sock_kill(sk); + chan->ops->close(chan->data); return 0; } @@ -2859,18 +3058,18 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); - chan->conn_state |= L2CAP_CONN_RNR_SENT; + set_bit(CONN_RNR_SENT, &chan->conn_state); } - if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY) + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) l2cap_retransmit_frames(chan); l2cap_ertm_send(chan); - if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) && + if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->frames_sent == 0) { control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(chan, control); @@ -2926,17 +3125,13 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: - if (chan->conn_state & L2CAP_CONN_SAR_SDU) + if (test_bit(CONN_SAR_SDU, &chan->conn_state)) goto drop; - err = sock_queue_rcv_skb(chan->sk, skb); - if (!err) - return err; - - break; + return chan->ops->recv(chan->data, skb); case L2CAP_SDU_START: - if (chan->conn_state & L2CAP_CONN_SAR_SDU) + if (test_bit(CONN_SAR_SDU, &chan->conn_state)) goto drop; chan->sdu_len = get_unaligned_le16(skb->data); @@ -2955,12 +3150,12 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); - chan->conn_state |= L2CAP_CONN_SAR_SDU; + set_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len = skb->len; break; case L2CAP_SDU_CONTINUE: - if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) + if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) goto disconnect; if (!chan->sdu) @@ -2975,39 +3170,34 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk break; case L2CAP_SDU_END: - if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) + if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) goto disconnect; if (!chan->sdu) goto disconnect; - if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) { - chan->partial_sdu_len += skb->len; + chan->partial_sdu_len += skb->len; - if (chan->partial_sdu_len > chan->imtu) - goto drop; + if (chan->partial_sdu_len > chan->imtu) + goto drop; - if (chan->partial_sdu_len != chan->sdu_len) - goto drop; + if (chan->partial_sdu_len != chan->sdu_len) + goto drop; - memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); - } + memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); _skb = skb_clone(chan->sdu, GFP_ATOMIC); if (!_skb) { - chan->conn_state |= L2CAP_CONN_SAR_RETRY; return -ENOMEM; } - err = sock_queue_rcv_skb(chan->sk, _skb); + err = chan->ops->recv(chan->data, _skb); if (err < 0) { kfree_skb(_skb); - chan->conn_state |= L2CAP_CONN_SAR_RETRY; return err; } - chan->conn_state &= ~L2CAP_CONN_SAR_RETRY; - chan->conn_state &= ~L2CAP_CONN_SAR_SDU; + clear_bit(CONN_SAR_SDU, &chan->conn_state); kfree_skb(chan->sdu); break; @@ -3026,128 +3216,55 @@ disconnect: return 0; } -static int l2cap_try_push_rx_skb(struct l2cap_chan *chan) +static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { - struct sk_buff *skb; u16 control; - int err; - while ((skb = skb_dequeue(&chan->busy_q))) { - control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; - err = l2cap_ertm_reassembly_sdu(chan, skb, control); - if (err < 0) { - skb_queue_head(&chan->busy_q, skb); - return -EBUSY; - } - - chan->buffer_seq = (chan->buffer_seq + 1) % 64; - } + BT_DBG("chan %p, Enter local busy", chan); - if (!(chan->conn_state & L2CAP_CONN_RNR_SENT)) - goto done; + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; + control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(chan, control); - chan->retry_count = 1; - - del_timer(&chan->retrans_timer); - __mod_monitor_timer(); - chan->conn_state |= L2CAP_CONN_WAIT_F; + set_bit(CONN_RNR_SENT, &chan->conn_state); -done: - chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; - chan->conn_state &= ~L2CAP_CONN_RNR_SENT; - - BT_DBG("chan %p, Exit local busy", chan); - - return 0; + __clear_ack_timer(chan); } -static void l2cap_busy_work(struct work_struct *work) +static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { - DECLARE_WAITQUEUE(wait, current); - struct l2cap_chan *chan = - container_of(work, struct l2cap_chan, busy_work); - struct sock *sk = chan->sk; - int n_tries = 0, timeo = HZ/5, err; - struct sk_buff *skb; - - lock_sock(sk); - - add_wait_queue(sk_sleep(sk), &wait); - while ((skb = skb_peek(&chan->busy_q))) { - set_current_state(TASK_INTERRUPTIBLE); - - if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { - err = -EBUSY; - l2cap_send_disconn_req(chan->conn, chan, EBUSY); - break; - } - - if (!timeo) - timeo = HZ/5; + u16 control; - if (signal_pending(current)) { - err = sock_intr_errno(timeo); - break; - } + if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) + goto done; - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); + control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; + l2cap_send_sframe(chan, control); + chan->retry_count = 1; - err = sock_error(sk); - if (err) - break; + __clear_retrans_timer(chan); + __set_monitor_timer(chan); - if (l2cap_try_push_rx_skb(chan) == 0) - break; - } + set_bit(CONN_WAIT_F, &chan->conn_state); - set_current_state(TASK_RUNNING); - remove_wait_queue(sk_sleep(sk), &wait); +done: + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + clear_bit(CONN_RNR_SENT, &chan->conn_state); - release_sock(sk); + BT_DBG("chan %p, Exit local busy", chan); } -static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) +void l2cap_chan_busy(struct l2cap_chan *chan, int busy) { - int sctrl, err; - - if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { - bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; - __skb_queue_tail(&chan->busy_q, skb); - return l2cap_try_push_rx_skb(chan); - - - } - - err = l2cap_ertm_reassembly_sdu(chan, skb, control); - if (err >= 0) { - chan->buffer_seq = (chan->buffer_seq + 1) % 64; - return err; + if (chan->mode == L2CAP_MODE_ERTM) { + if (busy) + l2cap_ertm_enter_local_busy(chan); + else + l2cap_ertm_exit_local_busy(chan); } - - /* Busy Condition */ - BT_DBG("chan %p, Enter local busy", chan); - - chan->conn_state |= L2CAP_CONN_LOCAL_BUSY; - bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; - __skb_queue_tail(&chan->busy_q, skb); - - sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - sctrl |= L2CAP_SUPER_RCV_NOT_READY; - l2cap_send_sframe(chan, sctrl); - - chan->conn_state |= L2CAP_CONN_RNR_SENT; - - del_timer(&chan->ack_timer); - - queue_work(_busy_wq, &chan->busy_work); - - return err; } static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) @@ -3162,19 +3279,19 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: - if (chan->conn_state & L2CAP_CONN_SAR_SDU) { + if (test_bit(CONN_SAR_SDU, &chan->conn_state)) { kfree_skb(chan->sdu); break; } - err = sock_queue_rcv_skb(chan->sk, skb); + err = chan->ops->recv(chan->data, skb); if (!err) return 0; break; case L2CAP_SDU_START: - if (chan->conn_state & L2CAP_CONN_SAR_SDU) { + if (test_bit(CONN_SAR_SDU, &chan->conn_state)) { kfree_skb(chan->sdu); break; } @@ -3195,13 +3312,13 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); - chan->conn_state |= L2CAP_CONN_SAR_SDU; + set_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len = skb->len; err = 0; break; case L2CAP_SDU_CONTINUE: - if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) + if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) break; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); @@ -3215,12 +3332,12 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf break; case L2CAP_SDU_END: - if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) + if (!test_bit(CONN_SAR_SDU, &chan->conn_state)) break; memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); - chan->conn_state &= ~L2CAP_CONN_SAR_SDU; + clear_bit(CONN_SAR_SDU, &chan->conn_state); chan->partial_sdu_len += skb->len; if (chan->partial_sdu_len > chan->imtu) @@ -3228,7 +3345,7 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf if (chan->partial_sdu_len == chan->sdu_len) { _skb = skb_clone(chan->sdu, GFP_ATOMIC); - err = sock_queue_rcv_skb(chan->sk, _skb); + err = chan->ops->recv(chan->data, _skb); if (err < 0) kfree_skb(_skb); } @@ -3248,13 +3365,22 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) struct sk_buff *skb; u16 control; - while ((skb = skb_peek(&chan->srej_q))) { + while ((skb = skb_peek(&chan->srej_q)) && + !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + int err; + if (bt_cb(skb)->tx_seq != tx_seq) break; skb = skb_dequeue(&chan->srej_q); control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; - l2cap_ertm_reassembly_sdu(chan, skb, control); + err = l2cap_ertm_reassembly_sdu(chan, skb, control); + + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + break; + } + chan->buffer_seq_srej = (chan->buffer_seq_srej + 1) % 64; tx_seq = (tx_seq + 1) % 64; @@ -3311,19 +3437,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont tx_seq, rx_control); if (L2CAP_CTRL_FINAL & rx_control && - chan->conn_state & L2CAP_CONN_WAIT_F) { - del_timer(&chan->monitor_timer); + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); if (chan->unacked_frames > 0) - __mod_retrans_timer(); - chan->conn_state &= ~L2CAP_CONN_WAIT_F; + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); } chan->expected_ack_seq = req_seq; l2cap_drop_acked_frames(chan); - if (tx_seq == chan->expected_tx_seq) - goto expected; - tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; if (tx_seq_offset < 0) tx_seq_offset += 64; @@ -3334,10 +3457,13 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont goto drop; } - if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY) + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) goto drop; - if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { + if (tx_seq == chan->expected_tx_seq) + goto expected; + + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { struct srej_list *first; first = list_first_entry(&chan->srej_l, @@ -3351,7 +3477,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont if (list_empty(&chan->srej_l)) { chan->buffer_seq = chan->buffer_seq_srej; - chan->conn_state &= ~L2CAP_CONN_SREJ_SENT; + clear_bit(CONN_SREJ_SENT, &chan->conn_state); l2cap_send_ack(chan); BT_DBG("chan %p, Exit SREJ_SENT", chan); } @@ -3380,7 +3506,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont if (tx_seq_offset < expected_tx_seq_offset) goto drop; - chan->conn_state |= L2CAP_CONN_SREJ_SENT; + set_bit(CONN_SREJ_SENT, &chan->conn_state); BT_DBG("chan %p, Enter SREJ", chan); @@ -3388,39 +3514,39 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont chan->buffer_seq_srej = chan->buffer_seq; __skb_queue_head_init(&chan->srej_q); - __skb_queue_head_init(&chan->busy_q); l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); - chan->conn_state |= L2CAP_CONN_SEND_PBIT; + set_bit(CONN_SEND_PBIT, &chan->conn_state); l2cap_send_srejframe(chan, tx_seq); - del_timer(&chan->ack_timer); + __clear_ack_timer(chan); } return 0; expected: chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; - if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; __skb_queue_tail(&chan->srej_q, skb); return 0; } - err = l2cap_push_rx_skb(chan, skb, rx_control); - if (err < 0) - return 0; + err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control); + chan->buffer_seq = (chan->buffer_seq + 1) % 64; + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return err; + } if (rx_control & L2CAP_CTRL_FINAL) { - if (chan->conn_state & L2CAP_CONN_REJ_ACT) - chan->conn_state &= ~L2CAP_CONN_REJ_ACT; - else + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } - __mod_ack_timer(); + __set_ack_timer(chan); chan->num_acked = (chan->num_acked + 1) % num_to_ack; if (chan->num_acked == num_to_ack - 1) @@ -3442,33 +3568,31 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_POLL) { - chan->conn_state |= L2CAP_CONN_SEND_FBIT; - if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { - if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && + set_bit(CONN_SEND_FBIT, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && (chan->unacked_frames > 0)) - __mod_retrans_timer(); + __set_retrans_timer(chan); - chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_send_srejtail(chan); } else { l2cap_send_i_or_rr_or_rnr(chan); } } else if (rx_control & L2CAP_CTRL_FINAL) { - chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (chan->conn_state & L2CAP_CONN_REJ_ACT) - chan->conn_state &= ~L2CAP_CONN_REJ_ACT; - else + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { - if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && (chan->unacked_frames > 0)) - __mod_retrans_timer(); + __set_retrans_timer(chan); - chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; - if (chan->conn_state & L2CAP_CONN_SREJ_SENT) + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) l2cap_send_ack(chan); else l2cap_ertm_send(chan); @@ -3481,21 +3605,19 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); - chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_FINAL) { - if (chan->conn_state & L2CAP_CONN_REJ_ACT) - chan->conn_state &= ~L2CAP_CONN_REJ_ACT; - else + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { l2cap_retransmit_frames(chan); - if (chan->conn_state & L2CAP_CONN_WAIT_F) - chan->conn_state |= L2CAP_CONN_REJ_ACT; + if (test_bit(CONN_WAIT_F, &chan->conn_state)) + set_bit(CONN_REJ_ACT, &chan->conn_state); } } static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) @@ -3504,32 +3626,32 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_ BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); - chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (rx_control & L2CAP_CTRL_POLL) { chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); - chan->conn_state |= L2CAP_CONN_SEND_FBIT; + set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_retransmit_one_frame(chan, tx_seq); l2cap_ertm_send(chan); - if (chan->conn_state & L2CAP_CONN_WAIT_F) { + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { chan->srej_save_reqseq = tx_seq; - chan->conn_state |= L2CAP_CONN_SREJ_ACT; + set_bit(CONN_SREJ_ACT, &chan->conn_state); } } else if (rx_control & L2CAP_CTRL_FINAL) { - if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) && + if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && chan->srej_save_reqseq == tx_seq) - chan->conn_state &= ~L2CAP_CONN_SREJ_ACT; + clear_bit(CONN_SREJ_ACT, &chan->conn_state); else l2cap_retransmit_one_frame(chan, tx_seq); } else { l2cap_retransmit_one_frame(chan, tx_seq); - if (chan->conn_state & L2CAP_CONN_WAIT_F) { + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { chan->srej_save_reqseq = tx_seq; - chan->conn_state |= L2CAP_CONN_SREJ_ACT; + set_bit(CONN_SREJ_ACT, &chan->conn_state); } } } @@ -3540,15 +3662,15 @@ static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_c BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); - chan->conn_state |= L2CAP_CONN_REMOTE_BUSY; + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); if (rx_control & L2CAP_CTRL_POLL) - chan->conn_state |= L2CAP_CONN_SEND_FBIT; + set_bit(CONN_SEND_FBIT, &chan->conn_state); - if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) { - del_timer(&chan->retrans_timer); + if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + __clear_retrans_timer(chan); if (rx_control & L2CAP_CTRL_POLL) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); return; @@ -3565,11 +3687,11 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); if (L2CAP_CTRL_FINAL & rx_control && - chan->conn_state & L2CAP_CONN_WAIT_F) { - del_timer(&chan->monitor_timer); + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); if (chan->unacked_frames > 0) - __mod_retrans_timer(); - chan->conn_state &= ~L2CAP_CONN_WAIT_F; + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); } switch (rx_control & L2CAP_CTRL_SUPERVISE) { @@ -3668,7 +3790,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk { struct l2cap_chan *chan; struct sock *sk = NULL; - struct l2cap_pinfo *pi; u16 control; u8 tx_seq; int len; @@ -3680,11 +3801,10 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk } sk = chan->sk; - pi = l2cap_pi(sk); BT_DBG("chan %p, len %d", chan, skb->len); - if (sk->sk_state != BT_CONNECTED) + if (chan->state != BT_CONNECTED) goto drop; switch (chan->mode) { @@ -3697,7 +3817,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk if (chan->imtu < skb->len) goto drop; - if (!sock_queue_rcv_skb(sk, skb)) + if (!chan->ops->recv(chan->data, skb)) goto done; break; @@ -3769,13 +3889,13 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str BT_DBG("sk %p, len %d", sk, skb->len); - if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) + if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; - if (l2cap_pi(sk)->chan->imtu < skb->len) + if (chan->imtu < skb->len) goto drop; - if (!sock_queue_rcv_skb(sk, skb)) + if (!chan->ops->recv(chan->data, skb)) goto done; drop: @@ -3802,13 +3922,13 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct BT_DBG("sk %p, len %d", sk, skb->len); - if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) + if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; - if (l2cap_pi(sk)->chan->imtu < skb->len) + if (chan->imtu < skb->len) goto drop; - if (!sock_queue_rcv_skb(sk, skb)) + if (!chan->ops->recv(chan->data, skb)) goto done; drop: @@ -3853,6 +3973,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) l2cap_att_channel(conn, cid, skb); break; + case L2CAP_CID_SMP: + if (smp_sig_channel(conn, skb)) + l2cap_conn_del(conn->hcon, EACCES); + break; + default: l2cap_data_channel(conn, cid, skb); break; @@ -3876,7 +4001,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) list_for_each_entry(c, &chan_list, global_l) { struct sock *sk = c->sk; - if (sk->sk_state != BT_LISTEN) + if (c->state != BT_LISTEN) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { @@ -3909,7 +4034,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) if (conn) l2cap_conn_ready(conn); } else - l2cap_conn_del(hcon, bt_err(status)); + l2cap_conn_del(hcon, bt_to_errno(status)); return 0; } @@ -3920,7 +4045,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon) BT_DBG("hcon %p", hcon); - if (hcon->type != ACL_LINK || !conn) + if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) return 0x13; return conn->disc_reason; @@ -3933,27 +4058,25 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) return -EINVAL; - l2cap_conn_del(hcon, bt_err(reason)); + l2cap_conn_del(hcon, bt_to_errno(reason)); return 0; } static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) { - struct sock *sk = chan->sk; - - if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return; if (encrypt == 0x00) { if (chan->sec_level == BT_SECURITY_MEDIUM) { - l2cap_sock_clear_timer(sk); - l2cap_sock_set_timer(sk, HZ * 5); + __clear_chan_timer(chan); + __set_chan_timer(chan, HZ * 5); } else if (chan->sec_level == BT_SECURITY_HIGH) - __l2cap_sock_close(sk, ECONNREFUSED); + l2cap_chan_close(chan, ECONNREFUSED); } else { if (chan->sec_level == BT_SECURITY_MEDIUM) - l2cap_sock_clear_timer(sk); + __clear_chan_timer(chan); } } @@ -3974,34 +4097,48 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) bh_lock_sock(sk); - if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) { + BT_DBG("chan->scid %d", chan->scid); + + if (chan->scid == L2CAP_CID_LE_DATA) { + if (!status && encrypt) { + chan->sec_level = hcon->sec_level; + del_timer(&conn->security_timer); + l2cap_chan_ready(sk); + smp_distribute_keys(conn, 0); + } + + bh_unlock_sock(sk); + continue; + } + + if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { bh_unlock_sock(sk); continue; } - if (!status && (sk->sk_state == BT_CONNECTED || - sk->sk_state == BT_CONFIG)) { + if (!status && (chan->state == BT_CONNECTED || + chan->state == BT_CONFIG)) { l2cap_check_encryption(chan, encrypt); bh_unlock_sock(sk); continue; } - if (sk->sk_state == BT_CONNECT) { + if (chan->state == BT_CONNECT) { if (!status) { struct l2cap_conn_req req; req.scid = cpu_to_le16(chan->scid); req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); - chan->conf_state |= L2CAP_CONF_CONNECT_PEND; + set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } else { - l2cap_sock_clear_timer(sk); - l2cap_sock_set_timer(sk, HZ / 10); + __clear_chan_timer(chan); + __set_chan_timer(chan, HZ / 10); } - } else if (sk->sk_state == BT_CONNECT2) { + } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; __u16 res, stat; @@ -4013,13 +4150,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) if (parent) parent->sk_data_ready(parent, 0); } else { - sk->sk_state = BT_CONFIG; + l2cap_state_change(chan, BT_CONFIG); res = L2CAP_CR_SUCCESS; stat = L2CAP_CS_NO_INFO; } } else { - sk->sk_state = BT_DISCONN; - l2cap_sock_set_timer(sk, HZ / 10); + l2cap_state_change(chan, BT_DISCONN); + __set_chan_timer(chan, HZ / 10); res = L2CAP_CR_SEC_BLOCK; stat = L2CAP_CS_NO_INFO; } @@ -4163,7 +4300,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p) seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), - sk->sk_state, __le16_to_cpu(c->psm), + c->state, __le16_to_cpu(c->psm), c->scid, c->dcid, c->imtu, c->omtu, c->sec_level, c->mode); } @@ -4206,12 +4343,6 @@ int __init l2cap_init(void) if (err < 0) return err; - _busy_wq = create_singlethread_workqueue("l2cap"); - if (!_busy_wq) { - err = -ENOMEM; - goto error; - } - err = hci_register_proto(&l2cap_hci_proto); if (err < 0) { BT_ERR("L2CAP protocol registration failed"); @@ -4229,7 +4360,6 @@ int __init l2cap_init(void) return 0; error: - destroy_workqueue(_busy_wq); l2cap_cleanup_sockets(); return err; } @@ -4238,9 +4368,6 @@ void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); - flush_workqueue(_busy_wq); - destroy_workqueue(_busy_wq); - if (hci_unregister_proto(&l2cap_hci_proto) < 0) BT_ERR("L2CAP protocol unregistration failed"); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 8248303f44e8..61f1f623091d 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -29,54 +29,11 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> +#include <net/bluetooth/smp.h> static const struct proto_ops l2cap_sock_ops; - -/* ---- L2CAP timers ---- */ -static void l2cap_sock_timeout(unsigned long arg) -{ - struct sock *sk = (struct sock *) arg; - int reason; - - BT_DBG("sock %p state %d", sk, sk->sk_state); - - bh_lock_sock(sk); - - if (sock_owned_by_user(sk)) { - /* sk is owned by user. Try again later */ - l2cap_sock_set_timer(sk, HZ / 5); - bh_unlock_sock(sk); - sock_put(sk); - return; - } - - if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) - reason = ECONNREFUSED; - else if (sk->sk_state == BT_CONNECT && - l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP) - reason = ECONNREFUSED; - else - reason = ETIMEDOUT; - - __l2cap_sock_close(sk, reason); - - bh_unlock_sock(sk); - - l2cap_sock_kill(sk); - sock_put(sk); -} - -void l2cap_sock_set_timer(struct sock *sk, long timeout) -{ - BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout); - sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); -} - -void l2cap_sock_clear_timer(struct sock *sk) -{ - BT_DBG("sock %p state %d", sk, sk->sk_state); - sk_stop_timer(sk, &sk->sk_timer); -} +static void l2cap_sock_init(struct sock *sk, struct sock *parent); +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio); static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { @@ -133,6 +90,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) chan->sec_level = BT_SECURITY_SDP; bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); + + chan->state = BT_BOUND; sk->sk_state = BT_BOUND; done: @@ -162,7 +121,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al lock_sock(sk); - if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(la.l2_psm || la.l2_cid)) { err = -EINVAL; goto done; @@ -204,8 +163,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al } /* PSM must be odd and lsb of upper byte must be 0 */ - if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && - sk->sk_type != SOCK_RAW && !la.l2_cid) { + if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid && + chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; goto done; } @@ -258,6 +217,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; + + chan->state = BT_LISTEN; sk->sk_state = BT_LISTEN; done: @@ -274,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(nsk = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -305,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) @@ -437,6 +398,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct bt_security sec; + struct bt_power pwr; int len, err = 0; BT_DBG("sk %p", sk); @@ -454,14 +416,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch switch (optname) { case BT_SECURITY: - if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM - && sk->sk_type != SOCK_RAW) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; break; } + memset(&sec, 0, sizeof(sec)); sec.level = chan->sec_level; + if (sk->sk_state == BT_CONNECTED) + sec.key_size = chan->conn->hcon->enc_key_size; + len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; @@ -485,6 +451,21 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; + case BT_POWER: + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM + && sk->sk_type != SOCK_RAW) { + err = -EINVAL; + break; + } + + pwr.force_active = chan->force_active; + + len = min_t(unsigned int, len, sizeof(pwr)); + if (copy_to_user(optval, (char *) &pwr, len)) + err = -EFAULT; + + break; + default: err = -ENOPROTOOPT; break; @@ -535,7 +516,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us chan->mode = opts.mode; switch (chan->mode) { case L2CAP_MODE_BASIC: - chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; + clear_bit(CONF_STATE2_DEVICE, &chan->conf_state); break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: @@ -585,6 +566,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct bt_security sec; + struct bt_power pwr; + struct l2cap_conn *conn; int len, err = 0; u32 opt; @@ -600,8 +583,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch switch (optname) { case BT_SECURITY: - if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM - && sk->sk_type != SOCK_RAW) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; break; } @@ -621,6 +604,20 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch } chan->sec_level = sec.level; + + conn = chan->conn; + if (conn && chan->scid == L2CAP_CID_LE_DATA) { + if (!conn->hcon->out) { + err = -EINVAL; + break; + } + + if (smp_conn_security(conn, sec.level)) + break; + + err = 0; + sk->sk_state = BT_CONFIG; + } break; case BT_DEFER_SETUP: @@ -661,6 +658,23 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch chan->flushable = opt; break; + case BT_POWER: + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + break; + } + + pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; + + len = min_t(unsigned int, sizeof(pwr), optlen); + if (copy_from_user((char *) &pwr, optval, len)) { + err = -EFAULT; + break; + } + chan->force_active = pwr.force_active; + break; + default: err = -ENOPROTOOPT; break; @@ -674,8 +688,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; - struct sk_buff *skb; - u16 control; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -690,87 +702,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { - err = -ENOTCONN; - goto done; - } - - /* Connectionless channel */ - if (sk->sk_type == SOCK_DGRAM) { - skb = l2cap_create_connless_pdu(chan, msg, len); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - } else { - l2cap_do_send(chan, skb); - err = len; - } - goto done; + release_sock(sk); + return -ENOTCONN; } - switch (chan->mode) { - case L2CAP_MODE_BASIC: - /* Check outgoing MTU */ - if (len > chan->omtu) { - err = -EMSGSIZE; - goto done; - } - - /* Create a basic PDU */ - skb = l2cap_create_basic_pdu(chan, msg, len); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - goto done; - } - - l2cap_do_send(chan, skb); - err = len; - break; - - case L2CAP_MODE_ERTM: - case L2CAP_MODE_STREAMING: - /* Entire SDU fits into one PDU */ - if (len <= chan->remote_mps) { - control = L2CAP_SDU_UNSEGMENTED; - skb = l2cap_create_iframe_pdu(chan, msg, len, control, - 0); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - goto done; - } - __skb_queue_tail(&chan->tx_q, skb); - - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb; - - } else { - /* Segment SDU into multiples PDUs */ - err = l2cap_sar_segment_sdu(chan, msg, len); - if (err < 0) - goto done; - } - - if (chan->mode == L2CAP_MODE_STREAMING) { - l2cap_streaming_send(chan); - err = len; - break; - } - - if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && - (chan->conn_state & L2CAP_CONN_WAIT_F)) { - err = len; - break; - } - err = l2cap_ertm_send(chan); - - if (err >= 0) - err = len; - break; - - default: - BT_DBG("bad state %1.1x", chan->mode); - err = -EBADFD; - } + err = l2cap_chan_send(chan, msg, len); -done: release_sock(sk); return err; } @@ -778,13 +715,15 @@ done: static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; + struct l2cap_pinfo *pi = l2cap_pi(sk); + int err; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { sk->sk_state = BT_CONFIG; - __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan); + __l2cap_connect_rsp_defer(pi->chan); release_sock(sk); return 0; } @@ -792,15 +731,43 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms release_sock(sk); if (sock->type == SOCK_STREAM) - return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); + err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); + else + err = bt_sock_recvmsg(iocb, sock, msg, len, flags); + + if (pi->chan->mode != L2CAP_MODE_ERTM) + return err; + + /* Attempt to put pending rx data in the socket buffer */ + + lock_sock(sk); + + if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state)) + goto done; + + if (pi->rx_busy_skb) { + if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb)) + pi->rx_busy_skb = NULL; + else + goto done; + } - return bt_sock_recvmsg(iocb, sock, msg, len, flags); + /* Restore data flow when half of the receive buffer is + * available. This avoids resending large numbers of + * frames. + */ + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) + l2cap_chan_busy(pi->chan, 0); + +done: + release_sock(sk); + return err; } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ -void l2cap_sock_kill(struct sock *sk) +static void l2cap_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; @@ -814,87 +781,6 @@ void l2cap_sock_kill(struct sock *sk) sock_put(sk); } -/* Must be called on unlocked socket. */ -static void l2cap_sock_close(struct sock *sk) -{ - l2cap_sock_clear_timer(sk); - lock_sock(sk); - __l2cap_sock_close(sk, ECONNRESET); - release_sock(sk); - l2cap_sock_kill(sk); -} - -static void l2cap_sock_cleanup_listen(struct sock *parent) -{ - struct sock *sk; - - BT_DBG("parent %p", parent); - - /* Close not yet accepted channels */ - while ((sk = bt_accept_dequeue(parent, NULL))) - l2cap_sock_close(sk); - - parent->sk_state = BT_CLOSED; - sock_set_flag(parent, SOCK_ZAPPED); -} - -void __l2cap_sock_close(struct sock *sk, int reason) -{ - struct l2cap_chan *chan = l2cap_pi(sk)->chan; - struct l2cap_conn *conn = chan->conn; - - BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); - - switch (sk->sk_state) { - case BT_LISTEN: - l2cap_sock_cleanup_listen(sk); - break; - - case BT_CONNECTED: - case BT_CONFIG: - if ((sk->sk_type == SOCK_SEQPACKET || - sk->sk_type == SOCK_STREAM) && - conn->hcon->type == ACL_LINK) { - l2cap_sock_set_timer(sk, sk->sk_sndtimeo); - l2cap_send_disconn_req(conn, chan, reason); - } else - l2cap_chan_del(chan, reason); - break; - - case BT_CONNECT2: - if ((sk->sk_type == SOCK_SEQPACKET || - sk->sk_type == SOCK_STREAM) && - conn->hcon->type == ACL_LINK) { - struct l2cap_conn_rsp rsp; - __u16 result; - - if (bt_sk(sk)->defer_setup) - result = L2CAP_CR_SEC_BLOCK; - else - result = L2CAP_CR_BAD_PSM; - - rsp.scid = cpu_to_le16(chan->dcid); - rsp.dcid = cpu_to_le16(chan->scid); - rsp.result = cpu_to_le16(result); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); - l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, - sizeof(rsp), &rsp); - } - - l2cap_chan_del(chan, reason); - break; - - case BT_CONNECT: - case BT_DISCONN: - l2cap_chan_del(chan, reason); - break; - - default: - sock_set_flag(sk, SOCK_ZAPPED); - break; - } -} - static int l2cap_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; @@ -912,8 +798,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) err = __l2cap_wait_ack(sk); sk->sk_shutdown = SHUTDOWN_MASK; - l2cap_sock_clear_timer(sk); - __l2cap_sock_close(sk, 0); + l2cap_chan_close(chan, 0); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, @@ -944,15 +829,85 @@ static int l2cap_sock_release(struct socket *sock) return err; } +static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) +{ + struct sock *sk, *parent = data; + + sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, + GFP_ATOMIC); + if (!sk) + return NULL; + + l2cap_sock_init(sk, parent); + + return l2cap_pi(sk)->chan; +} + +static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) +{ + int err; + struct sock *sk = data; + struct l2cap_pinfo *pi = l2cap_pi(sk); + + if (pi->rx_busy_skb) + return -ENOMEM; + + err = sock_queue_rcv_skb(sk, skb); + + /* For ERTM, handle one skb that doesn't fit into the recv + * buffer. This is important to do because the data frames + * have already been acked, so the skb cannot be discarded. + * + * Notify the l2cap core that the buffer is full, so the + * LOCAL_BUSY state is entered and no more frames are + * acked and reassembled until there is buffer space + * available. + */ + if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) { + pi->rx_busy_skb = skb; + l2cap_chan_busy(pi->chan, 1); + err = 0; + } + + return err; +} + +static void l2cap_sock_close_cb(void *data) +{ + struct sock *sk = data; + + l2cap_sock_kill(sk); +} + +static void l2cap_sock_state_change_cb(void *data, int state) +{ + struct sock *sk = data; + + sk->sk_state = state; +} + +static struct l2cap_ops l2cap_chan_ops = { + .name = "L2CAP Socket Interface", + .new_connection = l2cap_sock_new_connection_cb, + .recv = l2cap_sock_recv_cb, + .close = l2cap_sock_close_cb, + .state_change = l2cap_sock_state_change_cb, +}; + static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); + if (l2cap_pi(sk)->rx_busy_skb) { + kfree_skb(l2cap_pi(sk)->rx_busy_skb); + l2cap_pi(sk)->rx_busy_skb = NULL; + } + skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } -void l2cap_sock_init(struct sock *sk, struct sock *parent) +static void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_chan *chan = pi->chan; @@ -965,6 +920,7 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent) sk->sk_type = parent->sk_type; bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; + chan->chan_type = pchan->chan_type; chan->imtu = pchan->imtu; chan->omtu = pchan->omtu; chan->conf_state = pchan->conf_state; @@ -976,12 +932,27 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->role_switch = pchan->role_switch; chan->force_reliable = pchan->force_reliable; chan->flushable = pchan->flushable; + chan->force_active = pchan->force_active; } else { + + switch (sk->sk_type) { + case SOCK_RAW: + chan->chan_type = L2CAP_CHAN_RAW; + break; + case SOCK_DGRAM: + chan->chan_type = L2CAP_CHAN_CONN_LESS; + break; + case SOCK_SEQPACKET: + case SOCK_STREAM: + chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; + break; + } + chan->imtu = L2CAP_DEFAULT_MTU; chan->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { chan->mode = L2CAP_MODE_ERTM; - chan->conf_state |= L2CAP_CONF_STATE2_DEVICE; + set_bit(CONF_STATE2_DEVICE, &chan->conf_state); } else { chan->mode = L2CAP_MODE_BASIC; } @@ -992,10 +963,15 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->role_switch = 0; chan->force_reliable = 0; chan->flushable = BT_FLUSHABLE_OFF; + chan->force_active = BT_POWER_FORCE_ACTIVE_ON; + } /* Default config options */ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; + + chan->data = sk; + chan->ops = &l2cap_chan_ops; } static struct proto l2cap_proto = { @@ -1004,9 +980,10 @@ static struct proto l2cap_proto = { .obj_size = sizeof(struct l2cap_pinfo) }; -struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; + struct l2cap_chan *chan; sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); if (!sk) @@ -1016,14 +993,20 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; - sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); + sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; - setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); + chan = l2cap_chan_create(sk); + if (!chan) { + l2cap_sock_kill(sk); + return NULL; + } + + l2cap_pi(sk)->chan = chan; return sk; } @@ -1032,7 +1015,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; - struct l2cap_chan *chan; BT_DBG("sock %p", sock); @@ -1051,14 +1033,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, if (!sk) return -ENOMEM; - chan = l2cap_chan_create(sk); - if (!chan) { - l2cap_sock_kill(sk); - return -ENOMEM; - } - - l2cap_pi(sk)->chan = chan; - l2cap_sock_init(sk, NULL); return 0; } diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index b826d1bf10df..86a6bed229df 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -59,7 +59,7 @@ char *batostr(bdaddr_t *ba) EXPORT_SYMBOL(batostr); /* Bluetooth error codes to Unix errno mapping */ -int bt_err(__u16 code) +int bt_to_errno(__u16 code) { switch (code) { case 0: @@ -149,4 +149,23 @@ int bt_err(__u16 code) return ENOSYS; } } -EXPORT_SYMBOL(bt_err); +EXPORT_SYMBOL(bt_to_errno); + +int bt_printk(const char *level, const char *format, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + r = printk("%sBluetooth: %pV\n", level, &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(bt_printk); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index dae382ce7020..98327213d93d 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -41,7 +41,7 @@ struct pending_cmd { void *user_data; }; -LIST_HEAD(cmd_list); +static LIST_HEAD(cmd_list); static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { @@ -179,7 +179,7 @@ static int read_controller_info(struct sock *sk, u16 index) hci_del_off_timer(hdev); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); set_bit(HCI_MGMT, &hdev->flags); @@ -208,7 +208,7 @@ static int read_controller_info(struct sock *sk, u16 index) memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); @@ -316,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); up = test_bit(HCI_UP, &hdev->flags); if ((cp->val && up) || (!cp->val && !up)) { @@ -343,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) err = 0; failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } @@ -368,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); @@ -403,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -429,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); @@ -463,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -522,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (cp->val) set_bit(HCI_PAIRABLE, &hdev->flags); @@ -538,7 +538,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data, err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -739,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); if (!uuid) { @@ -763,7 +763,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -788,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { err = hci_uuids_clear(hdev); @@ -823,7 +823,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -847,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hdev->major_class = cp->major; hdev->minor_class = cp->minor; @@ -857,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -879,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); BT_DBG("hci%u enable %d", index, cp->enable); @@ -897,7 +897,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) struct hci_dev *hdev; struct mgmt_cp_load_keys *cp; u16 key_count, expected_len; - int i; + int i, err; cp = (void *) data; @@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) key_count = get_unaligned_le16(&cp->key_count); expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); - if (expected_len != len) { - BT_ERR("load_keys: expected %u bytes, got %u bytes", - len, expected_len); + if (expected_len > len) { + BT_ERR("load_keys: expected at least %u bytes, got %u bytes", + expected_len, len); return -EINVAL; } @@ -931,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, key_count); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hci_link_keys_clear(hdev); @@ -942,17 +942,36 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) else clear_bit(HCI_DEBUG_KEYS, &hdev->flags); - for (i = 0; i < key_count; i++) { - struct mgmt_key_info *key = &cp->keys[i]; + len -= sizeof(*cp); + i = 0; + + while (i < len) { + struct mgmt_key_info *key = (void *) cp->keys + i; + + i += sizeof(*key) + key->dlen; + + if (key->type == HCI_LK_SMP_LTK) { + struct key_master_id *id = (void *) key->data; + + if (key->dlen != sizeof(struct key_master_id)) + continue; + + hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len, + id->ediv, id->rand, key->val); + + continue; + } hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, key->pin_len); } - hci_dev_unlock(hdev); + err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0); + + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); - return 0; + return err; } static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) @@ -971,7 +990,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_remove_link_key(hdev, &cp->bdaddr); if (err < 0) { @@ -990,11 +1009,11 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) put_unaligned_le16(conn->handle, &dc.handle); dc.reason = 0x13; /* Remote User Terminated Connection */ - err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL); + err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); } unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1020,7 +1039,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); @@ -1055,7 +1074,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1076,7 +1095,7 @@ static int get_connections(struct sock *sk, u16 index) if (!hdev) return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); count = 0; list_for_each(p, &hdev->conn_hash.list) { @@ -1092,8 +1111,6 @@ static int get_connections(struct sock *sk, u16 index) put_unaligned_le16(count, &rp->conn_count); - read_lock(&hci_dev_list_lock); - i = 0; list_for_each(p, &hdev->conn_hash.list) { struct hci_conn *c = list_entry(p, struct hci_conn, list); @@ -1101,22 +1118,41 @@ static int get_connections(struct sock *sk, u16 index) bacpy(&rp->conn[i++], &c->dst); } - read_unlock(&hci_dev_list_lock); - err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); unlock: kfree(rp); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } +static int send_pin_code_neg_reply(struct sock *sk, u16 index, + struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) +{ + struct pending_cmd *cmd; + int err; + + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp, + sizeof(*cp)); + if (!cmd) + return -ENOMEM; + + err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), + &cp->bdaddr); + if (err < 0) + mgmt_pending_remove(cmd); + + return err; +} + static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; + struct hci_conn *conn; struct mgmt_cp_pin_code_reply *cp; + struct mgmt_cp_pin_code_neg_reply ncp; struct hci_cp_pin_code_reply reply; struct pending_cmd *cmd; int err; @@ -1132,13 +1168,32 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); goto failed; } + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + if (!conn) { + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN); + goto failed; + } + + if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { + bacpy(&ncp.bdaddr, &cp->bdaddr); + + BT_ERR("PIN code is not 16 bytes long"); + + err = send_pin_code_neg_reply(sk, index, hdev, &ncp); + if (err >= 0) + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, + EINVAL); + + goto failed; + } + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); if (!cmd) { err = -ENOMEM; @@ -1147,14 +1202,14 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, bacpy(&reply.bdaddr, &cp->bdaddr); reply.pin_len = cp->pin_len; - memcpy(reply.pin_code, cp->pin_code, 16); + memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); if (err < 0) mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1165,7 +1220,6 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, { struct hci_dev *hdev; struct mgmt_cp_pin_code_neg_reply *cp; - struct pending_cmd *cmd; int err; BT_DBG(""); @@ -1181,7 +1235,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, @@ -1189,20 +1243,10 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, - data, len); - if (!cmd) { - err = -ENOMEM; - goto failed; - } - - err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), - &cp->bdaddr); - if (err < 0) - mgmt_pending_remove(cmd); + err = send_pin_code_neg_reply(sk, index, hdev, cp); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1225,14 +1269,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); hdev->io_capability = cp->io_capability; BT_DBG("%s IO capability set to 0x%02x", hdev->name, hdev->io_capability); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); @@ -1318,7 +1362,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) if (!hdev) return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (cp->io_cap == 0x03) { sec_level = BT_SECURITY_MEDIUM; @@ -1328,7 +1372,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) auth_type = HCI_AT_DEDICATED_BONDING_MITM; } - conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); + conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type); if (IS_ERR(conn)) { err = PTR_ERR(conn); goto unlock; @@ -1360,7 +1404,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) err = 0; unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1392,7 +1436,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, mgmt_op, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, mgmt_op, ENETDOWN); @@ -1410,7 +1454,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1434,7 +1478,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, if (!hdev) return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); if (!cmd) { @@ -1449,7 +1493,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, mgmt_pending_remove(cmd); failed: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1468,7 +1512,7 @@ static int read_local_oob_data(struct sock *sk, u16 index) return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, @@ -1498,7 +1542,7 @@ static int read_local_oob_data(struct sock *sk, u16 index) mgmt_pending_remove(cmd); unlock: - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1522,7 +1566,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, cp->randomizer); @@ -1532,7 +1576,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1556,7 +1600,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index, return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, ENODEV); - hci_dev_lock(hdev); + hci_dev_lock_bh(hdev); err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); if (err < 0) @@ -1566,7 +1610,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index, err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, NULL, 0); - hci_dev_unlock(hdev); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; @@ -1641,6 +1685,70 @@ failed: return err; } +static int block_device(struct sock *sk, u16 index, unsigned char *data, + u16 len) +{ + struct hci_dev *hdev; + struct mgmt_cp_block_device *cp; + int err; + + BT_DBG("hci%u", index); + + cp = (void *) data; + + if (len != sizeof(*cp)) + return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, + EINVAL); + + hdev = hci_dev_get(index); + if (!hdev) + return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, + ENODEV); + + err = hci_blacklist_add(hdev, &cp->bdaddr); + + if (err < 0) + err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err); + else + err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, + NULL, 0); + hci_dev_put(hdev); + + return err; +} + +static int unblock_device(struct sock *sk, u16 index, unsigned char *data, + u16 len) +{ + struct hci_dev *hdev; + struct mgmt_cp_unblock_device *cp; + int err; + + BT_DBG("hci%u", index); + + cp = (void *) data; + + if (len != sizeof(*cp)) + return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, + EINVAL); + + hdev = hci_dev_get(index); + if (!hdev) + return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, + ENODEV); + + err = hci_blacklist_del(hdev, &cp->bdaddr); + + if (err < 0) + err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err); + else + err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, + NULL, 0); + hci_dev_put(hdev); + + return err; +} + int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) { unsigned char *buf; @@ -1755,6 +1863,12 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_STOP_DISCOVERY: err = stop_discovery(sk, index); break; + case MGMT_OP_BLOCK_DEVICE: + err = block_device(sk, index, buf + sizeof(*hdr), len); + break; + case MGMT_OP_UNBLOCK_DEVICE: + err = unblock_device(sk, index, buf + sizeof(*hdr), len); + break; default: BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, 0x01); @@ -1863,17 +1977,28 @@ int mgmt_connectable(u16 index, u8 connectable) int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) { - struct mgmt_ev_new_key ev; + struct mgmt_ev_new_key *ev; + int err, total; - memset(&ev, 0, sizeof(ev)); + total = sizeof(struct mgmt_ev_new_key) + key->dlen; + ev = kzalloc(total, GFP_ATOMIC); + if (!ev) + return -ENOMEM; - ev.store_hint = persistent; - bacpy(&ev.key.bdaddr, &key->bdaddr); - ev.key.type = key->type; - memcpy(ev.key.val, key->val, 16); - ev.key.pin_len = key->pin_len; + bacpy(&ev->key.bdaddr, &key->bdaddr); + ev->key.type = key->type; + memcpy(ev->key.val, key->val, 16); + ev->key.pin_len = key->pin_len; + ev->key.dlen = key->dlen; + ev->store_hint = persistent; - return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); + memcpy(ev->key.data, key->data, key->dlen); + + err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL); + + kfree(ev); + + return err; } int mgmt_connected(u16 index, bdaddr_t *bdaddr) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 5759bb7054f7..c2486a53714e 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex); #define rfcomm_lock() mutex_lock(&rfcomm_mutex) #define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) -static unsigned long rfcomm_event; static LIST_HEAD(session_list); @@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void) { if (!rfcomm_thread) return; - set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); wake_up_process(rfcomm_thread); } @@ -466,7 +464,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) switch (d->state) { case BT_CONNECT: - case BT_CONFIG: if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { set_bit(RFCOMM_AUTH_REJECT, &d->flags); rfcomm_schedule(); @@ -2038,19 +2035,18 @@ static int rfcomm_run(void *unused) rfcomm_add_listener(BDADDR_ANY); - while (!kthread_should_stop()) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { - /* No pending events. Let's sleep. - * Incoming connections and data will wake us up. */ - schedule(); - } - set_current_state(TASK_RUNNING); + + if (kthread_should_stop()) + break; /* Process stuff */ - clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); rfcomm_process_sessions(); + + schedule(); } + __set_current_state(TASK_RUNNING); rfcomm_kill_listener(); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 1b10727ce523..b02f0d47ab80 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f lock_sock(sk); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; @@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(nsk = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) @@ -679,7 +679,8 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c { struct sock *sk = sock->sk; struct bt_security sec; - int len, err = 0; + int err = 0; + size_t len; u32 opt; BT_DBG("sk %p", sk); @@ -741,7 +742,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; - struct sock *l2cap_sk; struct rfcomm_conninfo cinfo; struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; int len, err = 0; @@ -786,7 +786,6 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u break; } - l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = conn->hcon->handle; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index cb4fb7837e5c..d3d48b5b542c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -177,6 +177,7 @@ static int sco_connect(struct sock *sk) { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; + __u16 pkt_type = sco_pi(sk)->pkt_type; struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; @@ -192,10 +193,12 @@ static int sco_connect(struct sock *sk) if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; - else + else { type = SCO_LINK; + pkt_type &= SCO_ESCO_MASK; + } - hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; @@ -460,18 +463,22 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol, return 0; } -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sockaddr_sco sa; struct sock *sk = sock->sk; - bdaddr_t *src = &sa->sco_bdaddr; - int err = 0; + bdaddr_t *src = &sa.sco_bdaddr; + int len, err = 0; - BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); + BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr)); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); + lock_sock(sk); if (sk->sk_state != BT_OPEN) { @@ -485,7 +492,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le err = -EADDRINUSE; } else { /* Save source address */ - bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; sk->sk_state = BT_BOUND; } @@ -498,27 +506,34 @@ done: static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; - int err = 0; - + struct sockaddr_sco sa; + int len, err = 0; BT_DBG("sk %p", sk); - if (alen < sizeof(struct sockaddr_sco) || - addr->sa_family != AF_BLUETOOTH) + if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; - if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) - return -EBADFD; - - if (sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); lock_sock(sk); + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + /* Set destination address and psm */ - bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; err = sco_connect(sk); if (err) @@ -564,30 +579,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag lock_sock(sk); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(ch = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -595,8 +606,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) @@ -625,6 +640,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); + sa->sco_pkt_type = sco_pi(sk)->pkt_type; return 0; } @@ -932,7 +948,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) if (conn) sco_conn_ready(conn); } else - sco_conn_del(hcon, bt_err(status)); + sco_conn_del(hcon, bt_to_errno(status)); return 0; } @@ -944,7 +960,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return -EINVAL; - sco_conn_del(hcon, bt_err(reason)); + sco_conn_del(hcon, bt_to_errno(reason)); return 0; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c new file mode 100644 index 000000000000..391888b88a92 --- /dev/null +++ b/net/bluetooth/smp.c @@ -0,0 +1,702 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/smp.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <crypto/b128ops.h> + +#define SMP_TIMEOUT 30000 /* 30 seconds */ + +static inline void swap128(u8 src[16], u8 dst[16]) +{ + int i; + for (i = 0; i < 16; i++) + dst[15 - i] = src[i]; +} + +static inline void swap56(u8 src[7], u8 dst[7]) +{ + int i; + for (i = 0; i < 7; i++) + dst[6 - i] = src[i]; +} + +static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) +{ + struct blkcipher_desc desc; + struct scatterlist sg; + int err, iv_len; + unsigned char iv[128]; + + if (tfm == NULL) { + BT_ERR("tfm %p", tfm); + return -EINVAL; + } + + desc.tfm = tfm; + desc.flags = 0; + + err = crypto_blkcipher_setkey(tfm, k, 16); + if (err) { + BT_ERR("cipher setkey failed: %d", err); + return err; + } + + sg_init_one(&sg, r, 16); + + iv_len = crypto_blkcipher_ivsize(tfm); + if (iv_len) { + memset(&iv, 0xff, iv_len); + crypto_blkcipher_set_iv(tfm, iv, iv_len); + } + + err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16); + if (err) + BT_ERR("Encrypt data error %d", err); + + return err; +} + +static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16], + u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, + u8 _rat, bdaddr_t *ra, u8 res[16]) +{ + u8 p1[16], p2[16]; + int err; + + memset(p1, 0, 16); + + /* p1 = pres || preq || _rat || _iat */ + swap56(pres, p1); + swap56(preq, p1 + 7); + p1[14] = _rat; + p1[15] = _iat; + + memset(p2, 0, 16); + + /* p2 = padding || ia || ra */ + baswap((bdaddr_t *) (p2 + 4), ia); + baswap((bdaddr_t *) (p2 + 10), ra); + + /* res = r XOR p1 */ + u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); + + /* res = e(k, res) */ + err = smp_e(tfm, k, res); + if (err) { + BT_ERR("Encrypt data error"); + return err; + } + + /* res = res XOR p2 */ + u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); + + /* res = e(k, res) */ + err = smp_e(tfm, k, res); + if (err) + BT_ERR("Encrypt data error"); + + return err; +} + +static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], + u8 r1[16], u8 r2[16], u8 _r[16]) +{ + int err; + + /* Just least significant octets from r1 and r2 are considered */ + memcpy(_r, r1 + 8, 8); + memcpy(_r + 8, r2 + 8, 8); + + err = smp_e(tfm, k, _r); + if (err) + BT_ERR("Encrypt data error"); + + return err; +} + +static int smp_rand(u8 *buf) +{ + get_random_bytes(buf, 16); + + return 0; +} + +static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code, + u16 dlen, void *data) +{ + struct sk_buff *skb; + struct l2cap_hdr *lh; + int len; + + len = L2CAP_HDR_SIZE + sizeof(code) + dlen; + + if (len > conn->mtu) + return NULL; + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + return NULL; + + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(sizeof(code) + dlen); + lh->cid = cpu_to_le16(L2CAP_CID_SMP); + + memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); + + memcpy(skb_put(skb, dlen), data, dlen); + + return skb; +} + +static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) +{ + struct sk_buff *skb = smp_build_cmd(conn, code, len, data); + + BT_DBG("code 0x%2.2x", code); + + if (!skb) + return; + + hci_send_acl(conn->hcon, skb, 0); +} + +static __u8 seclevel_to_authreq(__u8 level) +{ + switch (level) { + case BT_SECURITY_HIGH: + /* Right now we don't support bonding */ + return SMP_AUTH_MITM; + + default: + return SMP_AUTH_NONE; + } +} + +static void build_pairing_cmd(struct l2cap_conn *conn, + struct smp_cmd_pairing *req, + struct smp_cmd_pairing *rsp, + __u8 authreq) +{ + u8 dist_keys; + + dist_keys = 0; + if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { + dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN; + authreq |= SMP_AUTH_BONDING; + } + + if (rsp == NULL) { + req->io_capability = conn->hcon->io_capability; + req->oob_flag = SMP_OOB_NOT_PRESENT; + req->max_key_size = SMP_MAX_ENC_KEY_SIZE; + req->init_key_dist = dist_keys; + req->resp_key_dist = dist_keys; + req->auth_req = authreq; + return; + } + + rsp->io_capability = conn->hcon->io_capability; + rsp->oob_flag = SMP_OOB_NOT_PRESENT; + rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; + rsp->init_key_dist = req->init_key_dist & dist_keys; + rsp->resp_key_dist = req->resp_key_dist & dist_keys; + rsp->auth_req = authreq; +} + +static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) +{ + if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || + (max_key_size < SMP_MIN_ENC_KEY_SIZE)) + return SMP_ENC_KEY_SIZE; + + conn->smp_key_size = max_key_size; + + return 0; +} + +static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_pairing rsp, *req = (void *) skb->data; + u8 key_size; + + BT_DBG("conn %p", conn); + + conn->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&conn->preq[1], req, sizeof(*req)); + skb_pull(skb, sizeof(*req)); + + if (req->oob_flag) + return SMP_OOB_NOT_AVAIL; + + /* We didn't start the pairing, so no requirements */ + build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE); + + key_size = min(req->max_key_size, rsp.max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + /* Just works */ + memset(conn->tk, 0, sizeof(conn->tk)); + + conn->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&conn->prsp[1], &rsp, sizeof(rsp)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); + + mod_timer(&conn->security_timer, jiffies + + msecs_to_jiffies(SMP_TIMEOUT)); + + return 0; +} + +static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_pairing *req, *rsp = (void *) skb->data; + struct smp_cmd_pairing_confirm cp; + struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; + int ret; + u8 res[16], key_size; + + BT_DBG("conn %p", conn); + + skb_pull(skb, sizeof(*rsp)); + + req = (void *) &conn->preq[1]; + + key_size = min(req->max_key_size, rsp->max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + if (rsp->oob_flag) + return SMP_OOB_NOT_AVAIL; + + /* Just works */ + memset(conn->tk, 0, sizeof(conn->tk)); + + conn->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&conn->prsp[1], rsp, sizeof(*rsp)); + + ret = smp_rand(conn->prnd); + if (ret) + return SMP_UNSPECIFIED; + + ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0, + conn->src, conn->hcon->dst_type, conn->dst, res); + if (ret) + return SMP_UNSPECIFIED; + + swap128(res, cp.confirm_val); + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); + + return 0; +} + +static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm; + + BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); + + memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf)); + skb_pull(skb, sizeof(conn->pcnf)); + + if (conn->hcon->out) { + u8 random[16]; + + swap128(conn->prnd, random); + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), + random); + } else { + struct smp_cmd_pairing_confirm cp; + int ret; + u8 res[16]; + + ret = smp_rand(conn->prnd); + if (ret) + return SMP_UNSPECIFIED; + + ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, + conn->hcon->dst_type, conn->dst, + 0, conn->src, res); + if (ret) + return SMP_CONFIRM_FAILED; + + swap128(res, cp.confirm_val); + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); + } + + mod_timer(&conn->security_timer, jiffies + + msecs_to_jiffies(SMP_TIMEOUT)); + + return 0; +} + +static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct hci_conn *hcon = conn->hcon; + struct crypto_blkcipher *tfm = hcon->hdev->tfm; + int ret; + u8 key[16], res[16], random[16], confirm[16]; + + swap128(skb->data, random); + skb_pull(skb, sizeof(random)); + + if (conn->hcon->out) + ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0, + conn->src, conn->hcon->dst_type, conn->dst, + res); + else + ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, + conn->hcon->dst_type, conn->dst, 0, conn->src, + res); + if (ret) + return SMP_UNSPECIFIED; + + BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); + + swap128(res, confirm); + + if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) { + BT_ERR("Pairing failed (confirmation values mismatch)"); + return SMP_CONFIRM_FAILED; + } + + if (conn->hcon->out) { + u8 stk[16], rand[8]; + __le16 ediv; + + memset(rand, 0, sizeof(rand)); + ediv = 0; + + smp_s1(tfm, conn->tk, random, conn->prnd, key); + swap128(key, stk); + + memset(stk + conn->smp_key_size, 0, + SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size); + + hci_le_start_enc(hcon, ediv, rand, stk); + hcon->enc_key_size = conn->smp_key_size; + } else { + u8 stk[16], r[16], rand[8]; + __le16 ediv; + + memset(rand, 0, sizeof(rand)); + ediv = 0; + + swap128(conn->prnd, r); + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); + + smp_s1(tfm, conn->tk, conn->prnd, random, key); + swap128(key, stk); + + memset(stk + conn->smp_key_size, 0, + SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size); + + hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size, + ediv, rand, stk); + } + + return 0; +} + +static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_security_req *rp = (void *) skb->data; + struct smp_cmd_pairing cp; + struct hci_conn *hcon = conn->hcon; + + BT_DBG("conn %p", conn); + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) + return 0; + + skb_pull(skb, sizeof(*rp)); + + memset(&cp, 0, sizeof(cp)); + build_pairing_cmd(conn, &cp, NULL, rp->auth_req); + + conn->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&conn->preq[1], &cp, sizeof(cp)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + + mod_timer(&conn->security_timer, jiffies + + msecs_to_jiffies(SMP_TIMEOUT)); + + set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend); + + return 0; +} + +int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) +{ + struct hci_conn *hcon = conn->hcon; + __u8 authreq; + + BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); + + if (!lmp_host_le_capable(hcon->hdev)) + return 1; + + if (IS_ERR(hcon->hdev->tfm)) + return 1; + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) + return 0; + + if (sec_level == BT_SECURITY_LOW) + return 1; + + if (hcon->sec_level >= sec_level) + return 1; + + authreq = seclevel_to_authreq(sec_level); + + if (hcon->link_mode & HCI_LM_MASTER) { + struct smp_cmd_pairing cp; + struct link_key *key; + + key = hci_find_link_key_type(hcon->hdev, conn->dst, + HCI_LK_SMP_LTK); + if (key) { + struct key_master_id *master = (void *) key->data; + + hci_le_start_enc(hcon, master->ediv, master->rand, + key->val); + hcon->enc_key_size = key->pin_len; + + goto done; + } + + build_pairing_cmd(conn, &cp, NULL, authreq); + conn->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&conn->preq[1], &cp, sizeof(cp)); + + mod_timer(&conn->security_timer, jiffies + + msecs_to_jiffies(SMP_TIMEOUT)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + } else { + struct smp_cmd_security_req cp; + cp.auth_req = authreq; + smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); + } + +done: + hcon->pending_sec_level = sec_level; + set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend); + + return 0; +} + +static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_encrypt_info *rp = (void *) skb->data; + + skb_pull(skb, sizeof(*rp)); + + memcpy(conn->tk, rp->ltk, sizeof(conn->tk)); + + return 0; +} + +static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_master_ident *rp = (void *) skb->data; + + skb_pull(skb, sizeof(*rp)); + + hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size, + rp->ediv, rp->rand, conn->tk); + + smp_distribute_keys(conn, 1); + + return 0; +} + +int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) +{ + __u8 code = skb->data[0]; + __u8 reason; + int err = 0; + + if (!lmp_host_le_capable(conn->hcon->hdev)) { + err = -ENOTSUPP; + reason = SMP_PAIRING_NOTSUPP; + goto done; + } + + if (IS_ERR(conn->hcon->hdev->tfm)) { + err = PTR_ERR(conn->hcon->hdev->tfm); + reason = SMP_PAIRING_NOTSUPP; + goto done; + } + + skb_pull(skb, sizeof(code)); + + switch (code) { + case SMP_CMD_PAIRING_REQ: + reason = smp_cmd_pairing_req(conn, skb); + break; + + case SMP_CMD_PAIRING_FAIL: + reason = 0; + err = -EPERM; + break; + + case SMP_CMD_PAIRING_RSP: + reason = smp_cmd_pairing_rsp(conn, skb); + break; + + case SMP_CMD_SECURITY_REQ: + reason = smp_cmd_security_req(conn, skb); + break; + + case SMP_CMD_PAIRING_CONFIRM: + reason = smp_cmd_pairing_confirm(conn, skb); + break; + + case SMP_CMD_PAIRING_RANDOM: + reason = smp_cmd_pairing_random(conn, skb); + break; + + case SMP_CMD_ENCRYPT_INFO: + reason = smp_cmd_encrypt_info(conn, skb); + break; + + case SMP_CMD_MASTER_IDENT: + reason = smp_cmd_master_ident(conn, skb); + break; + + case SMP_CMD_IDENT_INFO: + case SMP_CMD_IDENT_ADDR_INFO: + case SMP_CMD_SIGN_INFO: + /* Just ignored */ + reason = 0; + break; + + default: + BT_DBG("Unknown command code 0x%2.2x", code); + + reason = SMP_CMD_NOTSUPP; + err = -EOPNOTSUPP; + goto done; + } + +done: + if (reason) + smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), + &reason); + + kfree_skb(skb); + return err; +} + +int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) +{ + struct smp_cmd_pairing *req, *rsp; + __u8 *keydist; + + BT_DBG("conn %p force %d", conn, force); + + if (IS_ERR(conn->hcon->hdev->tfm)) + return PTR_ERR(conn->hcon->hdev->tfm); + + rsp = (void *) &conn->prsp[1]; + + /* The responder sends its keys first */ + if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) + return 0; + + req = (void *) &conn->preq[1]; + + if (conn->hcon->out) { + keydist = &rsp->init_key_dist; + *keydist &= req->init_key_dist; + } else { + keydist = &rsp->resp_key_dist; + *keydist &= req->resp_key_dist; + } + + + BT_DBG("keydist 0x%x", *keydist); + + if (*keydist & SMP_DIST_ENC_KEY) { + struct smp_cmd_encrypt_info enc; + struct smp_cmd_master_ident ident; + __le16 ediv; + + get_random_bytes(enc.ltk, sizeof(enc.ltk)); + get_random_bytes(&ediv, sizeof(ediv)); + get_random_bytes(ident.rand, sizeof(ident.rand)); + + smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); + + hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size, + ediv, ident.rand, enc.ltk); + + ident.ediv = cpu_to_le16(ediv); + + smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); + + *keydist &= ~SMP_DIST_ENC_KEY; + } + + if (*keydist & SMP_DIST_ID_KEY) { + struct smp_cmd_ident_addr_info addrinfo; + struct smp_cmd_ident_info idinfo; + + /* Send a dummy key */ + get_random_bytes(idinfo.irk, sizeof(idinfo.irk)); + + smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); + + /* Just public address */ + memset(&addrinfo, 0, sizeof(addrinfo)); + bacpy(&addrinfo.bdaddr, conn->src); + + smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), + &addrinfo); + + *keydist &= ~SMP_DIST_ID_KEY; + } + + if (*keydist & SMP_DIST_SIGN) { + struct smp_cmd_sign_info sign; + + /* Send a dummy key */ + get_random_bytes(sign.csrk, sizeof(sign.csrk)); + + smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + + *keydist &= ~SMP_DIST_SIGN; + } + + return 0; +} diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ff3ed6086ce1..dac6a2147467 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -38,16 +38,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) } #endif - u64_stats_update_begin(&brstats->syncp); - brstats->tx_packets++; - brstats->tx_bytes += skb->len; - u64_stats_update_end(&brstats->syncp); - BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); + u64_stats_update_begin(&brstats->syncp); + brstats->tx_packets++; + /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */ + brstats->tx_bytes += skb->len; + u64_stats_update_end(&brstats->syncp); + rcu_read_lock(); if (is_broadcast_ether_addr(dest)) br_flood_deliver(br, skb); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index f2dc69cffb57..681084d76a93 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \ inet_fragment.o ping.o obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o +obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ef1528af7abf..4d60f12c7b6c 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -118,6 +118,19 @@ #include <linux/mroute.h> #endif +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} +#else +static inline int current_has_network(void) +{ + return 1; +} +#endif /* The inetsw table contains everything that inet_create needs to * build a new socket. @@ -258,6 +271,7 @@ static inline int inet_netns_ok(struct net *net, int protocol) return ipprot->netns_ok; } + /* * Create an inet socket. */ @@ -274,6 +288,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (!current_has_network()) + return -EACCES; + if (unlikely(!inet_ehash_secret)) if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) build_ehash_secret(); @@ -874,6 +891,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCSIFFLAGS: + case SIOCKILLADDR: err = devinet_ioctl(net, cmd, (void __user *)arg); break; default: diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 4155abca261b..66439a7c6d31 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -59,6 +59,7 @@ #include <net/arp.h> #include <net/ip.h> +#include <net/tcp.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/rtnetlink.h> @@ -735,6 +736,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ + case SIOCKILLADDR: /* Nuke all sockets on this address */ ret = -EACCES; if (!capable(CAP_NET_ADMIN)) goto out; @@ -786,7 +788,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) } ret = -EADDRNOTAVAIL; - if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) + if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS + && cmd != SIOCKILLADDR) goto done; switch (cmd) { @@ -912,6 +915,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) inet_insert_ifa(ifa); } break; + case SIOCKILLADDR: /* Nuke all connections on this address */ + ret = tcp_nuke_addr(net, (struct sockaddr *) sin); + break; } done: rtnl_unlock(); diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 1dfc18a03fd4..73b4e91a87e7 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -113,6 +113,18 @@ config IP_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. +config IP_NF_TARGET_REJECT_SKERR + bool "Force socket error when rejecting with icmp*" + depends on IP_NF_TARGET_REJECT + default n + help + This option enables turning a "--reject-with icmp*" into a matching + socket error also. + The REJECT target normally allows sending an ICMP message. But it + leaves the local socket unaware of any ingress rejects. + + If unsure, say N. + config IP_NF_TARGET_LOG tristate "LOG target support" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 51f13f8ec724..9dd754c7f2b6 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -128,6 +128,14 @@ static void send_reset(struct sk_buff *oldskb, int hook) static inline void send_unreach(struct sk_buff *skb_in, int code) { icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); +#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR + if (skb_in->sk) { + skb_in->sk->sk_err = icmp_err_convert[code].errno; + skb_in->sk->sk_error_report(skb_in->sk); + pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n", + skb_in->sk->sk_err, skb_in, skb_in->sk); + } +#endif } static unsigned int diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c new file mode 100644 index 000000000000..0cbbf10026a6 --- /dev/null +++ b/net/ipv4/sysfs_net_ipv4.c @@ -0,0 +1,88 @@ +/* + * net/ipv4/sysfs_net_ipv4.c + * + * sysfs-based networking knobs (so we can, unlike with sysctl, control perms) + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/init.h> +#include <net/tcp.h> + +#define CREATE_IPV4_FILE(_name, _var) \ +static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%d\n", _var); \ +} \ +static ssize_t _name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val, ret; \ + ret = sscanf(buf, "%d", &val); \ + if (ret != 1) \ + return -EINVAL; \ + if (val < 0) \ + return -EINVAL; \ + _var = val; \ + return count; \ +} \ +static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]); +CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]); +CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]); + +CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]); +CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]); +CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]); + +static struct attribute *ipv4_attrs[] = { + &tcp_wmem_min_attr.attr, + &tcp_wmem_def_attr.attr, + &tcp_wmem_max_attr.attr, + &tcp_rmem_min_attr.attr, + &tcp_rmem_def_attr.attr, + &tcp_rmem_max_attr.attr, + NULL +}; + +static struct attribute_group ipv4_attr_group = { + .attrs = ipv4_attrs, +}; + +static __init int sysfs_ipv4_init(void) +{ + struct kobject *ipv4_kobject; + int ret; + + ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj); + if (!ipv4_kobject) + return -ENOMEM; + + ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group); + if (ret) { + kobject_put(ipv4_kobject); + return ret; + } + + return 0; +} + +subsys_initcall(sysfs_ipv4_init); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 46febcacb729..09ced58e6a51 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -266,11 +266,15 @@ #include <linux/crypto.h> #include <linux/time.h> #include <linux/slab.h> +#include <linux/uid_stat.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/ip.h> +#include <net/ip6_route.h> +#include <net/ipv6.h> +#include <net/transp_v6.h> #include <net/netdma.h> #include <net/sock.h> @@ -1112,6 +1116,9 @@ out: if (copied) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); + + if (copied > 0) + uid_stat_tcp_snd(current_uid(), copied); return copied; do_fault: @@ -1388,8 +1395,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ - if (copied > 0) + if (copied > 0) { tcp_cleanup_rbuf(sk, copied); + uid_stat_tcp_rcv(current_uid(), copied); + } + return copied; } EXPORT_SYMBOL(tcp_read_sock); @@ -1771,6 +1781,9 @@ skip_copy: tcp_cleanup_rbuf(sk, copied); release_sock(sk); + + if (copied > 0) + uid_stat_tcp_rcv(current_uid(), copied); return copied; out: @@ -1779,6 +1792,8 @@ out: recv_urg: err = tcp_recv_urg(sk, msg, len, flags); + if (err > 0) + uid_stat_tcp_rcv(current_uid(), err); goto out; } EXPORT_SYMBOL(tcp_recvmsg); @@ -3310,3 +3325,107 @@ void __init tcp_init(void) tcp_secret_retiring = &tcp_secret_two; tcp_secret_secondary = &tcp_secret_two; } + +static int tcp_is_local(struct net *net, __be32 addr) { + struct rtable *rt; + struct flowi4 fl4 = { .daddr = addr }; + rt = ip_route_output_key(net, &fl4); + if (IS_ERR_OR_NULL(rt)) + return 0; + return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK); +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int tcp_is_local6(struct net *net, struct in6_addr *addr) { + struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0); + return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK); +} +#endif + +/* + * tcp_nuke_addr - destroy all sockets on the given local address + * if local address is the unspecified address (0.0.0.0 or ::), destroy all + * sockets with local addresses that are not configured. + */ +int tcp_nuke_addr(struct net *net, struct sockaddr *addr) +{ + int family = addr->sa_family; + unsigned int bucket; + + struct in_addr *in; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct in6_addr *in6; +#endif + if (family == AF_INET) { + in = &((struct sockaddr_in *)addr)->sin_addr; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + } else if (family == AF_INET6) { + in6 = &((struct sockaddr_in6 *)addr)->sin6_addr; +#endif + } else { + return -EAFNOSUPPORT; + } + + for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) { + struct hlist_nulls_node *node; + struct sock *sk; + spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket); + +restart: + spin_lock_bh(lock); + sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) { + struct inet_sock *inet = inet_sk(sk); + + if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT) + continue; + if (sock_flag(sk, SOCK_DEAD)) + continue; + + if (family == AF_INET) { + __be32 s4 = inet->inet_rcv_saddr; + if (s4 == LOOPBACK4_IPV6) + continue; + + if (in->s_addr != s4 && + !(in->s_addr == INADDR_ANY && + !tcp_is_local(net, s4))) + continue; + } + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + if (family == AF_INET6) { + struct in6_addr *s6; + if (!inet->pinet6) + continue; + + s6 = &inet->pinet6->rcv_saddr; + if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED) + continue; + + if (!ipv6_addr_equal(in6, s6) && + !(ipv6_addr_equal(in6, &in6addr_any) && + !tcp_is_local6(net, s6))) + continue; + } +#endif + + sock_hold(sk); + spin_unlock_bh(lock); + + local_bh_disable(); + bh_lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_error_report(sk); + + tcp_done(sk); + bh_unlock_sock(sk); + local_bh_enable(); + sock_put(sk); + + goto restart; + } + spin_unlock_bh(lock); + } + + return 0; +} diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 498b927f68be..cf2cf62f33fc 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i { struct inet6_dev *idev = ifp->idev; struct in6_addr addr, *tmpaddr; - unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; + unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age; unsigned long regen_advance; int tmp_plen; int ret = 0; int max_addresses; u32 addr_flags; + unsigned long now = jiffies; write_lock(&idev->lock); if (ift) { @@ -874,7 +875,7 @@ retry: goto out; } memcpy(&addr.s6_addr[8], idev->rndid, 8); - age = (jiffies - ifp->tstamp) / HZ; + age = (now - ifp->tstamp) / HZ; tmp_valid_lft = min_t(__u32, ifp->valid_lft, idev->cnf.temp_valid_lft + age); @@ -884,7 +885,6 @@ retry: idev->cnf.max_desync_factor); tmp_plen = ifp->prefix_len; max_addresses = idev->cnf.max_addresses; - tmp_cstamp = ifp->cstamp; tmp_tstamp = ifp->tstamp; spin_unlock_bh(&ifp->lock); @@ -929,7 +929,7 @@ retry: ift->ifpub = ifp; ift->valid_lft = tmp_valid_lft; ift->prefered_lft = tmp_prefered_lft; - ift->cstamp = tmp_cstamp; + ift->cstamp = now; ift->tstamp = tmp_tstamp; spin_unlock_bh(&ift->lock); @@ -1988,25 +1988,50 @@ ok: #ifdef CONFIG_IPV6_PRIVACY read_lock_bh(&in6_dev->lock); /* update all temporary addresses in the list */ - list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { - /* - * When adjusting the lifetimes of an existing - * temporary address, only lower the lifetimes. - * Implementations must not increase the - * lifetimes of an existing temporary address - * when processing a Prefix Information Option. - */ + list_for_each_entry(ift, &in6_dev->tempaddr_list, + tmp_list) { + int age, max_valid, max_prefered; + if (ifp != ift->ifpub) continue; + /* + * RFC 4941 section 3.3: + * If a received option will extend the lifetime + * of a public address, the lifetimes of + * temporary addresses should be extended, + * subject to the overall constraint that no + * temporary addresses should ever remain + * "valid" or "preferred" for a time longer than + * (TEMP_VALID_LIFETIME) or + * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), + * respectively. + */ + age = (now - ift->cstamp) / HZ; + max_valid = in6_dev->cnf.temp_valid_lft - age; + if (max_valid < 0) + max_valid = 0; + + max_prefered = in6_dev->cnf.temp_prefered_lft - + in6_dev->cnf.max_desync_factor - + age; + if (max_prefered < 0) + max_prefered = 0; + + if (valid_lft > max_valid) + valid_lft = max_valid; + + if (prefered_lft > max_prefered) + prefered_lft = max_prefered; + spin_lock(&ift->lock); flags = ift->flags; - if (ift->valid_lft > valid_lft && - ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) - ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; - if (ift->prefered_lft > prefered_lft && - ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) - ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; + ift->valid_lft = valid_lft; + ift->prefered_lft = prefered_lft; + ift->tstamp = now; + if (prefered_lft > 0) + ift->flags &= ~IFA_F_DEPRECATED; + spin_unlock(&ift->lock); if (!(flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ift); @@ -2014,9 +2039,11 @@ ok: if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { /* - * When a new public address is created as described in [ADDRCONF], - * also create a new temporary address. Also create a temporary - * address if it's enabled but no temporary address currently exists. + * When a new public address is created as + * described in [ADDRCONF], also create a new + * temporary address. Also create a temporary + * address if it's enabled but no temporary + * address currently exists. */ read_unlock_bh(&in6_dev->lock); ipv6_create_tempaddr(ifp, NULL); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 559123644e5d..7e8340ef5a27 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -63,6 +63,20 @@ #include <asm/system.h> #include <linux/mroute6.h> +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} +#else +static inline int current_has_network(void) +{ + return 1; +} +#endif + MODULE_AUTHOR("Cast of dozens"); MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); MODULE_LICENSE("GPL"); @@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (!current_has_network()) + return -EACCES; + if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM && !inet_ehash_secret) @@ -477,6 +494,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, EXPORT_SYMBOL(inet6_getname); +int inet6_killaddr_ioctl(struct net *net, void __user *arg) { + struct in6_ifreq ireq; + struct sockaddr_in6 sin6; + + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) + return -EFAULT; + + sin6.sin6_family = AF_INET6; + ipv6_addr_copy(&sin6.sin6_addr, &ireq.ifr6_addr); + return tcp_nuke_addr(net, (struct sockaddr *) &sin6); +} + int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -501,6 +533,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return addrconf_del_ifaddr(net, (void __user *) arg); case SIOCSIFDSTADDR: return addrconf_set_dstaddr(net, (void __user *) arg); + case SIOCKILLADDR: + return inet6_killaddr_ioctl(net, (void __user *) arg); default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 448464844a25..5bbf53169202 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -174,6 +174,18 @@ config IP6_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. +config IP6_NF_TARGET_REJECT_SKERR + bool "Force socket error when rejecting with icmp*" + depends on IP6_NF_TARGET_REJECT + default n + help + This option enables turning a "--reject-with icmp*" into a matching + socket error also. + The REJECT target normally allows sending an ICMP message. But it + leaves the local socket unaware of any ingress rejects. + + If unsure, say N. + config IP6_NF_MANGLE tristate "Packet mangling" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 94874b0bdcdc..14cb310064f6 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -2292,16 +2292,15 @@ static void __exit ip6_tables_fini(void) * "No next header". * * If target header is found, its offset is set in *offset and return protocol - * number. Otherwise, return -1. + * number. Otherwise, return -ENOENT or -EBADMSG. * * If the first fragment doesn't contain the final protocol header or * NEXTHDR_NONE it is considered invalid. * * Note that non-1st fragment is special case that "the protocol number * of last header" is "next header" field in Fragment header. In this case, - * *offset is meaningless and fragment offset is stored in *fragoff if fragoff - * isn't NULL. - * + * *offset is meaningless. If fragoff is not NULL, the fragment offset is + * stored in *fragoff; if it is NULL, return -EINVAL. */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff) @@ -2342,9 +2341,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, if (target < 0 && ((!ipv6_ext_hdr(hp->nexthdr)) || hp->nexthdr == NEXTHDR_NONE)) { - if (fragoff) + if (fragoff) { *fragoff = _frag_off; - return hp->nexthdr; + return hp->nexthdr; + } else { + return -EINVAL; + } } return -ENOENT; } diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index a5a4c5dd5396..09d30498c927 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -177,6 +177,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code, skb_in->dev = net->loopback_dev; icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); +#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR + if (skb_in->sk) { + icmpv6_err_convert(ICMPV6_DEST_UNREACH, code, + &skb_in->sk->sk_err); + skb_in->sk->sk_error_report(skb_in->sk); + pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n", + skb_in->sk->sk_err, skb_in, skb_in->sk); + } +#endif } static unsigned int diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 32bff6d86cb2..5bd5c612a9bf 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -902,6 +902,8 @@ config NETFILTER_XT_MATCH_OWNER based on who created the socket: the user or group. It is also possible to check whether a socket actually exists. + Conflicts with '"quota, tag, uid" match' + config NETFILTER_XT_MATCH_POLICY tristate 'IPsec "policy" match support' depends on XFRM @@ -935,6 +937,22 @@ config NETFILTER_XT_MATCH_PKTTYPE To compile it as a module, choose M here. If unsure, say N. +config NETFILTER_XT_MATCH_QTAGUID + bool '"quota, tag, owner" match and stats support' + depends on NETFILTER_XT_MATCH_SOCKET + depends on NETFILTER_XT_MATCH_OWNER=n + help + This option replaces the `owner' match. In addition to matching + on uid, it keeps stats based on a tag assigned to a socket. + The full tag is comprised of a UID and an accounting tag. + The tags are assignable to sockets from user space (e.g. a download + manager can assign the socket to another UID for accounting). + Stats and control are done via /proc/net/xt_qtaguid/. + It replaces owner as it takes the same arguments, but should + really be recognized by the iptables tool. + + If unsure, say `N'. + config NETFILTER_XT_MATCH_QUOTA tristate '"quota" match support' depends on NETFILTER_ADVANCED @@ -945,6 +963,30 @@ config NETFILTER_XT_MATCH_QUOTA If you want to compile it as a module, say M here and read <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. +config NETFILTER_XT_MATCH_QUOTA2 + tristate '"quota2" match support' + depends on NETFILTER_ADVANCED + help + This option adds a `quota2' match, which allows to match on a + byte counter correctly and not per CPU. + It allows naming the quotas. + This is based on http://xtables-addons.git.sourceforge.net + + If you want to compile it as a module, say M here and read + <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. + +config NETFILTER_XT_MATCH_QUOTA2_LOG + bool '"quota2" Netfilter LOG support' + depends on NETFILTER_XT_MATCH_QUOTA2 + depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no + default n + help + This option allows `quota2' to log ONCE when a quota limit + is passed. It logs via NETLINK using the NETLINK_NFLOG family. + It logs similarly to how ipt_ULOG would without data. + + If unsure, say `N'. + config NETFILTER_XT_MATCH_RATEEST tristate '"rateest" match support' depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 1a02853df863..6d917176c3b8 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -95,7 +95,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o +obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o +obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c new file mode 100644 index 000000000000..08086d680c2c --- /dev/null +++ b/net/netfilter/xt_qtaguid.c @@ -0,0 +1,2785 @@ +/* + * Kernel iptables module to track stats for packets based on user tags. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * There are run-time debug flags enabled via the debug_mask module param, or + * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h. + */ +#define DEBUG + +#include <linux/file.h> +#include <linux/inetdevice.h> +#include <linux/module.h> +#include <linux/netfilter/x_tables.h> +#include <linux/netfilter/xt_qtaguid.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <net/addrconf.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <net/udp.h> + +#include <linux/netfilter/xt_socket.h> +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + +/* + * We only use the xt_socket funcs within a similar context to avoid unexpected + * return values. + */ +#define XT_SOCKET_SUPPORTED_HOOKS \ + ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN)) + + +static const char *module_procdirname = "xt_qtaguid"; +static struct proc_dir_entry *xt_qtaguid_procdir; + +static unsigned int proc_iface_perms = S_IRUGO; +module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR); + +static struct proc_dir_entry *xt_qtaguid_stats_file; +static unsigned int proc_stats_perms = S_IRUGO; +module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR); + +static struct proc_dir_entry *xt_qtaguid_ctrl_file; +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO; +#else +static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR; +#endif +module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR); + +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include <linux/android_aid.h> +static gid_t proc_stats_readall_gid = AID_NET_BW_STATS; +static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT; +#else +/* 0 means, don't limit anybody */ +static gid_t proc_stats_readall_gid; +static gid_t proc_ctrl_write_gid; +#endif +module_param_named(stats_readall_gid, proc_stats_readall_gid, uint, + S_IRUGO | S_IWUSR); +module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint, + S_IRUGO | S_IWUSR); + +/* + * Limit the number of active tags (via socket tags) for a given UID. + * Multiple processes could share the UID. + */ +static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS; +module_param(max_sock_tags, int, S_IRUGO | S_IWUSR); + +/* + * After the kernel has initiallized this module, it is still possible + * to make it passive. + * Setting passive to Y: + * - the iface stats handling will not act on notifications. + * - iptables matches will never match. + * - ctrl commands silently succeed. + * - stats are always empty. + * This is mostly usefull when a bug is suspected. + */ +static bool module_passive; +module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR); + +/* + * Control how qtaguid data is tracked per proc/uid. + * Setting tag_tracking_passive to Y: + * - don't create proc specific structs to track tags + * - don't check that active tag stats exceed some limits. + * - don't clean up socket tags on process exits. + * This is mostly usefull when a bug is suspected. + */ +static bool qtu_proc_handling_passive; +module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool, + S_IRUGO | S_IWUSR); + +#define QTU_DEV_NAME "xt_qtaguid" + +uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK; +module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR); + +/*---------------------------------------------------------------------------*/ +static const char *iface_stat_procdirname = "iface_stat"; +static struct proc_dir_entry *iface_stat_procdir; +static const char *iface_stat_all_procfilename = "iface_stat_all"; +static struct proc_dir_entry *iface_stat_all_procfile; + +/* + * Ordering of locks: + * outer locks: + * iface_stat_list_lock + * sock_tag_list_lock + * inner locks: + * uid_tag_data_tree_lock + * tag_counter_set_list_lock + * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock + * is acquired. + * + * Call tree with all lock holders as of 2011-09-25: + * + * iface_stat_all_proc_read() + * iface_stat_list_lock + * (struct iface_stat) + * + * qtaguid_ctrl_proc_read() + * sock_tag_list_lock + * (sock_tag_tree) + * (struct proc_qtu_data->sock_tag_list) + * prdebug_full_state() + * sock_tag_list_lock + * (sock_tag_tree) + * uid_tag_data_tree_lock + * (uid_tag_data_tree) + * (proc_qtu_data_tree) + * iface_stat_list_lock + * + * qtaguid_stats_proc_read() + * iface_stat_list_lock + * struct iface_stat->tag_stat_list_lock + * + * qtudev_open() + * uid_tag_data_tree_lock + * + * qtudev_release() + * sock_tag_data_list_lock + * uid_tag_data_tree_lock + * prdebug_full_state() + * sock_tag_list_lock + * uid_tag_data_tree_lock + * iface_stat_list_lock + * + * iface_netdev_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inetaddr_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inet6addr_event_handler() + * iface_stat_create_ipv6() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * qtaguid_mt() + * account_for_uid() + * if_tag_stat_update() + * get_sock_stat() + * sock_tag_list_lock + * struct iface_stat->tag_stat_list_lock + * tag_stat_update() + * get_active_counter_set() + * tag_counter_set_list_lock + * tag_stat_update() + * get_active_counter_set() + * tag_counter_set_list_lock + * + * + * qtaguid_ctrl_parse() + * ctrl_cmd_delete() + * sock_tag_list_lock + * tag_counter_set_list_lock + * iface_stat_list_lock + * struct iface_stat->tag_stat_list_lock + * uid_tag_data_tree_lock + * ctrl_cmd_counter_set() + * tag_counter_set_list_lock + * ctrl_cmd_tag() + * sock_tag_list_lock + * (sock_tag_tree) + * get_tag_ref() + * uid_tag_data_tree_lock + * (uid_tag_data_tree) + * uid_tag_data_tree_lock + * (proc_qtu_data_tree) + * ctrl_cmd_untag() + * sock_tag_list_lock + * uid_tag_data_tree_lock + * + */ +static LIST_HEAD(iface_stat_list); +static DEFINE_SPINLOCK(iface_stat_list_lock); + +static struct rb_root sock_tag_tree = RB_ROOT; +static DEFINE_SPINLOCK(sock_tag_list_lock); + +static struct rb_root tag_counter_set_tree = RB_ROOT; +static DEFINE_SPINLOCK(tag_counter_set_list_lock); + +static struct rb_root uid_tag_data_tree = RB_ROOT; +static DEFINE_SPINLOCK(uid_tag_data_tree_lock); + +static struct rb_root proc_qtu_data_tree = RB_ROOT; +/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */ + +static struct qtaguid_event_counts qtu_events; +/*----------------------------------------------*/ +static bool can_manipulate_uids(void) +{ + /* root pwnd */ + return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid) + || in_egroup_p(proc_ctrl_write_gid); +} + +static bool can_impersonate_uid(uid_t uid) +{ + return uid == current_fsuid() || can_manipulate_uids(); +} + +static bool can_read_other_uid_stats(uid_t uid) +{ + /* root pwnd */ + return unlikely(!current_fsuid()) || uid == current_fsuid() + || unlikely(!proc_stats_readall_gid) + || in_egroup_p(proc_stats_readall_gid); +} + +static inline void dc_add_byte_packets(struct data_counters *counters, int set, + enum ifs_tx_rx direction, + enum ifs_proto ifs_proto, + int bytes, + int packets) +{ + counters->bpc[set][direction][ifs_proto].bytes += bytes; + counters->bpc[set][direction][ifs_proto].packets += packets; +} + +static inline uint64_t dc_sum_bytes(struct data_counters *counters, + int set, + enum ifs_tx_rx direction) +{ + return counters->bpc[set][direction][IFS_TCP].bytes + + counters->bpc[set][direction][IFS_UDP].bytes + + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes; +} + +static inline uint64_t dc_sum_packets(struct data_counters *counters, + int set, + enum ifs_tx_rx direction) +{ + return counters->bpc[set][direction][IFS_TCP].packets + + counters->bpc[set][direction][IFS_UDP].packets + + counters->bpc[set][direction][IFS_PROTO_OTHER].packets; +} + +static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct tag_node *data = rb_entry(node, struct tag_node, node); + int result; + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " node=%p data=%p\n", tag, node, data); + result = tag_compare(tag, data->tag); + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " data.tag=0x%llx (uid=%u) res=%d\n", + tag, data->tag, get_uid_from_tag(data->tag), result); + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct tag_node *this = rb_entry(*new, struct tag_node, + node); + int result = tag_compare(data->tag, this->tag); + RB_DEBUG("qtaguid: %s(): tag=0x%llx" + " (uid=%u)\n", __func__, + this->tag, + get_uid_from_tag(this->tag)); + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_stat, tn.node); +} + +static void tag_counter_set_tree_insert(struct tag_counter_set *data, + struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root, + tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_counter_set, tn.node); + +} + +static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_ref, tn.node); +} + +static struct sock_tag *sock_tag_tree_search(struct rb_root *root, + const struct sock *sk) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct sock_tag *data = rb_entry(node, struct sock_tag, + sock_node); + if (sk < data->sk) + node = node->rb_left; + else if (sk > data->sk) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct sock_tag *this = rb_entry(*new, struct sock_tag, + sock_node); + parent = *new; + if (data->sk < this->sk) + new = &((*new)->rb_left); + else if (data->sk > this->sk) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->sock_node, parent, new); + rb_insert_color(&data->sock_node, root); +} + +static void sock_tag_tree_erase(struct rb_root *st_to_free_tree) +{ + struct rb_node *node; + struct sock_tag *st_entry; + + node = rb_first(st_to_free_tree); + while (node) { + st_entry = rb_entry(node, struct sock_tag, sock_node); + node = rb_next(node); + CT_DEBUG("qtaguid: %s(): " + "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__, + st_entry->sk, + st_entry->tag, + get_uid_from_tag(st_entry->tag)); + rb_erase(&st_entry->sock_node, st_to_free_tree); + sockfd_put(st_entry->socket); + kfree(st_entry); + } +} + +static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root, + const pid_t pid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct proc_qtu_data *data = rb_entry(node, + struct proc_qtu_data, + node); + if (pid < data->pid) + node = node->rb_left; + else if (pid > data->pid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void proc_qtu_data_tree_insert(struct proc_qtu_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct proc_qtu_data *this = rb_entry(*new, + struct proc_qtu_data, + node); + parent = *new; + if (data->pid < this->pid) + new = &((*new)->rb_left); + else if (data->pid > this->pid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void uid_tag_data_tree_insert(struct uid_tag_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct uid_tag_data *this = rb_entry(*new, + struct uid_tag_data, + node); + parent = *new; + if (data->uid < this->uid) + new = &((*new)->rb_left); + else if (data->uid > this->uid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root, + uid_t uid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct uid_tag_data *data = rb_entry(node, + struct uid_tag_data, + node); + if (uid < data->uid) + node = node->rb_left; + else if (uid > data->uid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +/* + * Allocates a new uid_tag_data struct if needed. + * Returns a pointer to the found or allocated uid_tag_data. + * Returns a PTR_ERR on failures, and lock is not held. + * If found is not NULL: + * sets *found to true if not allocated. + * sets *found to false if allocated. + */ +struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res) +{ + struct uid_tag_data *utd_entry; + + /* Look for top level uid_tag_data for the UID */ + utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid); + DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry); + + if (found_res) + *found_res = utd_entry; + if (utd_entry) + return utd_entry; + + utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC); + if (!utd_entry) { + pr_err("qtaguid: get_uid_data(%u): " + "tag data alloc failed\n", uid); + return ERR_PTR(-ENOMEM); + } + + utd_entry->uid = uid; + utd_entry->tag_ref_tree = RB_ROOT; + uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree); + DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry); + return utd_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *new_tag_ref(tag_t new_tag, + struct uid_tag_data *utd_entry) +{ + struct tag_ref *tr_entry; + int res; + + if (utd_entry->num_active_tags + 1 > max_sock_tags) { + pr_info("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc quota exceeded. max=%d\n", + new_tag, max_sock_tags); + res = -EMFILE; + goto err_res; + + } + + tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC); + if (!tr_entry) { + pr_err("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc failed\n", + new_tag); + res = -ENOMEM; + goto err_res; + } + tr_entry->tn.tag = new_tag; + /* tr_entry->num_sock_tags handled by caller */ + utd_entry->num_active_tags++; + tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: new_tag_ref(0x%llx): " + " inserted new tag ref %p\n", + new_tag, tr_entry); + return tr_entry; + +err_res: + return ERR_PTR(res); +} + +static struct tag_ref *lookup_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + bool found_utd; + uid_t uid = get_uid_from_tag(full_tag); + + DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n", + full_tag, uid); + + utd_entry = get_uid_data(uid, &found_utd); + if (IS_ERR_OR_NULL(utd_entry)) { + if (utd_res) + *utd_res = utd_entry; + return NULL; + } + + tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *get_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + + DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n", + full_tag); + spin_lock_bh(&uid_tag_data_tree_lock); + tr_entry = lookup_tag_ref(full_tag, &utd_entry); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + if (!tr_entry) + tr_entry = new_tag_ref(full_tag, utd_entry); + + spin_unlock_bh(&uid_tag_data_tree_lock); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Checks and maybe frees the UID Tag Data entry */ +static void put_utd_entry(struct uid_tag_data *utd_entry) +{ + /* Are we done with the UID tag data entry? */ + if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) && + !utd_entry->num_pqd) { + DR_DEBUG("qtaguid: %s(): " + "erase utd_entry=%p uid=%u " + "by pid=%u tgid=%u uid=%u\n", __func__, + utd_entry, utd_entry->uid, + current->pid, current->tgid, current_fsuid()); + BUG_ON(utd_entry->num_active_tags); + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } else { + DR_DEBUG("qtaguid: %s(): " + "utd_entry=%p still has %d tags %d proc_qtu_data\n", + __func__, utd_entry, utd_entry->num_active_tags, + utd_entry->num_pqd); + BUG_ON(!(utd_entry->num_active_tags || + utd_entry->num_pqd)); + } +} + +/* + * If no sock_tags are using this tag_ref, + * decrements refcount of utd_entry, removes tr_entry + * from utd_entry->tag_ref_tree and frees. + */ +static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry, + struct uid_tag_data *utd_entry) +{ + DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__, + tr_entry, tr_entry->tn.tag, + get_uid_from_tag(tr_entry->tn.tag)); + if (!tr_entry->num_sock_tags) { + BUG_ON(!utd_entry->num_active_tags); + utd_entry->num_active_tags--; + rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry); + kfree(tr_entry); + } +} + +static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry) +{ + struct rb_node *node; + struct tag_ref *tr_entry; + tag_t acct_tag; + + DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__, + full_tag, get_uid_from_tag(full_tag)); + acct_tag = get_atag_from_tag(full_tag); + node = rb_first(&utd_entry->tag_ref_tree); + while (node) { + tr_entry = rb_entry(node, struct tag_ref, tn.node); + node = rb_next(node); + if (!acct_tag || tr_entry->tn.tag == full_tag) + free_tag_ref_from_utd_entry(tr_entry, utd_entry); + } +} + +static int read_proc_u64(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + uint64_t value; + char *p = page; + uint64_t *iface_entry = data; + + if (!data) + return 0; + + value = *iface_entry; + p += sprintf(p, "%llu\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int read_proc_bool(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + bool value; + char *p = page; + bool *bool_entry = data; + + if (!data) + return 0; + + value = *bool_entry; + p += sprintf(p, "%u\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int get_active_counter_set(tag_t tag) +{ + int active_set = 0; + struct tag_counter_set *tcs; + + MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)" + " (uid=%u)\n", + tag, get_uid_from_tag(tag)); + /* For now we only handle UID tags for active sets */ + tag = get_utag_from_tag(tag); + spin_lock_bh(&tag_counter_set_list_lock); + tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (tcs) + active_set = tcs->active_set; + spin_unlock_bh(&tag_counter_set_list_lock); + return active_set; +} + +/* + * Find the entry for tracking the specified interface. + * Caller must hold iface_stat_list_lock + */ +static struct iface_stat *get_iface_entry(const char *ifname) +{ + struct iface_stat *iface_entry; + + /* Find the entry for tracking the specified tag within the interface */ + if (ifname == NULL) { + pr_info("qtaguid: iface_stat: get() NULL device name\n"); + return NULL; + } + + /* Iterate over interfaces */ + list_for_each_entry(iface_entry, &iface_stat_list, list) { + if (!strcmp(ifname, iface_entry->ifname)) + goto done; + } + iface_entry = NULL; +done: + return iface_entry; +} + +static int iface_stat_all_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, + int *eof, void *data) +{ + char *outp = page; + int item_index = 0; + int len; + struct iface_stat *iface_entry; + struct rtnl_link_stats64 dev_stats, *stats; + struct rtnl_link_stats64 no_dev_stats = {0}; + + if (unlikely(module_passive)) { + *eof = 1; + return 0; + } + + CT_DEBUG("qtaguid:proc iface_stat_all " + "page=%p *num_items_returned=%p off=%ld " + "char_count=%d *eof=%d\n", page, *num_items_returned, + items_to_skip, char_count, *eof); + + if (*eof) + return 0; + + /* + * This lock will prevent iface_stat_update() from changing active, + * and in turn prevent an interface from unregistering itself. + */ + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(iface_entry, &iface_stat_list, list) { + if (item_index++ < items_to_skip) + continue; + + if (iface_entry->active) { + stats = dev_get_stats(iface_entry->net_dev, + &dev_stats); + } else { + stats = &no_dev_stats; + } + len = snprintf(outp, char_count, + "%s %d " + "%llu %llu %llu %llu " + "%llu %llu %llu %llu\n", + iface_entry->ifname, + iface_entry->active, + iface_entry->totals[IFS_RX].bytes, + iface_entry->totals[IFS_RX].packets, + iface_entry->totals[IFS_TX].bytes, + iface_entry->totals[IFS_TX].packets, + stats->rx_bytes, stats->rx_packets, + stats->tx_bytes, stats->tx_packets); + if (len >= char_count) { + spin_unlock_bh(&iface_stat_list_lock); + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + spin_unlock_bh(&iface_stat_list_lock); + + *eof = 1; + return outp - page; +} + +static void iface_create_proc_worker(struct work_struct *work) +{ + struct proc_dir_entry *proc_entry; + struct iface_stat_work *isw = container_of(work, struct iface_stat_work, + iface_work); + struct iface_stat *new_iface = isw->iface_entry; + + /* iface_entries are not deleted, so safe to manipulate. */ + proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir); + if (IS_ERR_OR_NULL(proc_entry)) { + pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n"); + kfree(isw); + return; + } + + new_iface->proc_ptr = proc_entry; + + create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry, + read_proc_u64, &new_iface->totals[IFS_TX].bytes); + create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry, + read_proc_u64, &new_iface->totals[IFS_RX].bytes); + create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry, + read_proc_u64, &new_iface->totals[IFS_TX].packets); + create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry, + read_proc_u64, &new_iface->totals[IFS_RX].packets); + create_proc_read_entry("active", proc_iface_perms, proc_entry, + read_proc_bool, &new_iface->active); + + IF_DEBUG("qtaguid: iface_stat: create_proc(): done " + "entry=%p dev=%s\n", new_iface, new_iface->ifname); + kfree(isw); +} + +/* + * Will set the entry's active state, and + * update the net_dev accordingly also. + */ +static void _iface_stat_set_active(struct iface_stat *entry, + struct net_device *net_dev, + bool activate) +{ + if (activate) { + entry->net_dev = net_dev; + entry->active = true; + IF_DEBUG("qtaguid: %s(%s): " + "enable tracking. rfcnt=%d\n", __func__, + entry->ifname, + percpu_read(*net_dev->pcpu_refcnt)); + } else { + entry->active = false; + entry->net_dev = NULL; + IF_DEBUG("qtaguid: %s(%s): " + "disable tracking. rfcnt=%d\n", __func__, + entry->ifname, + percpu_read(*net_dev->pcpu_refcnt)); + + } +} + +/* Caller must hold iface_stat_list_lock */ +static struct iface_stat *iface_alloc(struct net_device *net_dev) +{ + struct iface_stat *new_iface; + struct iface_stat_work *isw; + + new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC); + if (new_iface == NULL) { + pr_err("qtaguid: iface_stat: create(%s): " + "iface_stat alloc failed\n", net_dev->name); + return NULL; + } + new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC); + if (new_iface->ifname == NULL) { + pr_err("qtaguid: iface_stat: create(%s): " + "ifname alloc failed\n", net_dev->name); + kfree(new_iface); + return NULL; + } + spin_lock_init(&new_iface->tag_stat_list_lock); + new_iface->tag_stat_tree = RB_ROOT; + _iface_stat_set_active(new_iface, net_dev, true); + + /* + * ipv6 notifier chains are atomic :( + * No create_proc_read_entry() for you! + */ + isw = kmalloc(sizeof(*isw), GFP_ATOMIC); + if (!isw) { + pr_err("qtaguid: iface_stat: create(%s): " + "work alloc failed\n", new_iface->ifname); + _iface_stat_set_active(new_iface, net_dev, false); + kfree(new_iface->ifname); + kfree(new_iface); + return NULL; + } + isw->iface_entry = new_iface; + INIT_WORK(&isw->iface_work, iface_create_proc_worker); + schedule_work(&isw->iface_work); + list_add(&new_iface->list, &iface_stat_list); + return new_iface; +} + +static void iface_check_stats_reset_and_adjust(struct net_device *net_dev, + struct iface_stat *iface) +{ + struct rtnl_link_stats64 dev_stats, *stats; + bool stats_rewound; + + stats = dev_get_stats(net_dev, &dev_stats); + /* No empty packets */ + stats_rewound = + (stats->rx_bytes < iface->last_known[IFS_RX].bytes) + || (stats->tx_bytes < iface->last_known[IFS_TX].bytes); + + IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p " + "bytes rx/tx=%llu/%llu " + "active=%d last_known=%d " + "stats_rewound=%d\n", __func__, + net_dev ? net_dev->name : "?", + iface, net_dev, + stats->rx_bytes, stats->tx_bytes, + iface->active, iface->last_known_valid, stats_rewound); + + if (iface->active && iface->last_known_valid && stats_rewound) { + pr_warn_once("qtaguid: iface_stat: %s(%s): " + "iface reset its stats unexpectedly\n", __func__, + net_dev->name); + + iface->totals[IFS_TX].bytes += iface->last_known[IFS_TX].bytes; + iface->totals[IFS_TX].packets += + iface->last_known[IFS_TX].packets; + iface->totals[IFS_RX].bytes += iface->last_known[IFS_RX].bytes; + iface->totals[IFS_RX].packets += + iface->last_known[IFS_RX].packets; + iface->last_known_valid = false; + IF_DEBUG("qtaguid: %s(%s): iface=%p " + "used last known bytes rx/tx=%llu/%llu\n", __func__, + iface->ifname, iface, iface->last_known[IFS_RX].bytes, + iface->last_known[IFS_TX].bytes); + } +} + +/* + * Create a new entry for tracking the specified interface. + * Do nothing if the entry already exists. + * Called when an interface is configured with a valid IP address. + */ +static void iface_stat_create(struct net_device *net_dev, + struct in_ifaddr *ifa) +{ + struct in_device *in_dev = NULL; + const char *ifname; + struct iface_stat *entry; + __be32 ipaddr = 0; + struct iface_stat *new_iface; + + IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n", + net_dev ? net_dev->name : "?", + ifa, net_dev); + if (!net_dev) { + pr_err("qtaguid: iface_stat: create(): no net dev\n"); + return; + } + + ifname = net_dev->name; + if (!ifa) { + in_dev = in_dev_get(net_dev); + if (!in_dev) { + pr_err("qtaguid: iface_stat: create(%s): no inet dev\n", + ifname); + return; + } + IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n", + ifname, in_dev); + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + IF_DEBUG("qtaguid: iface_stat: create(%s): " + "ifa=%p ifa_label=%s\n", + ifname, ifa, + ifa->ifa_label ? ifa->ifa_label : "(null)"); + if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label)) + break; + } + } + + if (!ifa) { + IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n", + ifname); + goto done_put; + } + ipaddr = ifa->ifa_local; + + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(ifname); + if (entry != NULL) { + bool activate = !ipv4_is_loopback(ipaddr); + IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n", + ifname, entry); + iface_check_stats_reset_and_adjust(net_dev, entry); + _iface_stat_set_active(entry, net_dev, activate); + IF_DEBUG("qtaguid: %s(%s): " + "tracking now %d on ip=%pI4\n", __func__, + entry->ifname, activate, &ipaddr); + goto done_unlock_put; + } else if (ipv4_is_loopback(ipaddr)) { + IF_DEBUG("qtaguid: iface_stat: create(%s): " + "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr); + goto done_unlock_put; + } + + new_iface = iface_alloc(net_dev); + IF_DEBUG("qtaguid: iface_stat: create(%s): done " + "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr); +done_unlock_put: + spin_unlock_bh(&iface_stat_list_lock); +done_put: + if (in_dev) + in_dev_put(in_dev); +} + +static void iface_stat_create_ipv6(struct net_device *net_dev, + struct inet6_ifaddr *ifa) +{ + struct in_device *in_dev; + const char *ifname; + struct iface_stat *entry; + struct iface_stat *new_iface; + int addr_type; + + IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n", + ifa, net_dev, net_dev ? net_dev->name : ""); + if (!net_dev) { + pr_err("qtaguid: iface_stat: create6(): no net dev!\n"); + return; + } + ifname = net_dev->name; + + in_dev = in_dev_get(net_dev); + if (!in_dev) { + pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n", + ifname); + return; + } + + IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n", + ifname, in_dev); + + if (!ifa) { + IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n", + ifname); + goto done_put; + } + addr_type = ipv6_addr_type(&ifa->addr); + + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(ifname); + if (entry != NULL) { + bool activate = !(addr_type & IPV6_ADDR_LOOPBACK); + IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__, + ifname, entry); + iface_check_stats_reset_and_adjust(net_dev, entry); + _iface_stat_set_active(entry, net_dev, activate); + IF_DEBUG("qtaguid: %s(%s): " + "tracking now %d on ip=%pI6c\n", __func__, + entry->ifname, activate, &ifa->addr); + goto done_unlock_put; + } else if (addr_type & IPV6_ADDR_LOOPBACK) { + IF_DEBUG("qtaguid: %s(%s): " + "ignore loopback dev. ip=%pI6c\n", __func__, + ifname, &ifa->addr); + goto done_unlock_put; + } + + new_iface = iface_alloc(net_dev); + IF_DEBUG("qtaguid: iface_stat: create6(%s): done " + "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr); + +done_unlock_put: + spin_unlock_bh(&iface_stat_list_lock); +done_put: + in_dev_put(in_dev); +} + +static struct sock_tag *get_sock_stat_nl(const struct sock *sk) +{ + MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk); + return sock_tag_tree_search(&sock_tag_tree, sk); +} + +static struct sock_tag *get_sock_stat(const struct sock *sk) +{ + struct sock_tag *sock_tag_entry; + MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk); + if (!sk) + return NULL; + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(sk); + spin_unlock_bh(&sock_tag_list_lock); + return sock_tag_entry; +} + +static void +data_counters_update(struct data_counters *dc, int set, + enum ifs_tx_rx direction, int proto, int bytes) +{ + switch (proto) { + case IPPROTO_TCP: + dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1); + break; + case IPPROTO_UDP: + dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1); + break; + case IPPROTO_IP: + default: + dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes, + 1); + break; + } +} + +/* + * Update stats for the specified interface. Do nothing if the entry + * does not exist (when a device was never configured with an IP address). + * Called when an device is being unregistered. + */ +static void iface_stat_update(struct net_device *net_dev, bool stash_only) +{ + struct rtnl_link_stats64 dev_stats, *stats; + struct iface_stat *entry; + + stats = dev_get_stats(net_dev, &dev_stats); + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(net_dev->name); + if (entry == NULL) { + IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n", + net_dev->name); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + + IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__, + net_dev->name, entry); + if (!entry->active) { + IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__, + net_dev->name); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + + if (stash_only) { + entry->last_known[IFS_TX].bytes = stats->tx_bytes; + entry->last_known[IFS_TX].packets = stats->tx_packets; + entry->last_known[IFS_RX].bytes = stats->rx_bytes; + entry->last_known[IFS_RX].packets = stats->rx_packets; + entry->last_known_valid = true; + IF_DEBUG("qtaguid: %s(%s): " + "dev stats stashed rx/tx=%llu/%llu\n", __func__, + net_dev->name, stats->rx_bytes, stats->tx_bytes); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + entry->totals[IFS_TX].bytes += stats->tx_bytes; + entry->totals[IFS_TX].packets += stats->tx_packets; + entry->totals[IFS_RX].bytes += stats->rx_bytes; + entry->totals[IFS_RX].packets += stats->rx_packets; + /* We don't need the last_known[] anymore */ + entry->last_known_valid = false; + _iface_stat_set_active(entry, net_dev, false); + IF_DEBUG("qtaguid: %s(%s): " + "disable tracking. rx/tx=%llu/%llu\n", __func__, + net_dev->name, stats->rx_bytes, stats->tx_bytes); + spin_unlock_bh(&iface_stat_list_lock); +} + +static void tag_stat_update(struct tag_stat *tag_entry, + enum ifs_tx_rx direction, int proto, int bytes) +{ + int active_set; + active_set = get_active_counter_set(tag_entry->tn.tag); + MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d " + "dir=%d proto=%d bytes=%d)\n", + tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag), + active_set, direction, proto, bytes); + data_counters_update(&tag_entry->counters, active_set, direction, + proto, bytes); + if (tag_entry->parent_counters) + data_counters_update(tag_entry->parent_counters, active_set, + direction, proto, bytes); +} + +/* + * Create a new entry for tracking the specified {acct_tag,uid_tag} within + * the interface. + * iface_entry->tag_stat_list_lock should be held. + */ +static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry, + tag_t tag) +{ + struct tag_stat *new_tag_stat_entry = NULL; + IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx" + " (uid=%u)\n", __func__, + iface_entry, tag, get_uid_from_tag(tag)); + new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC); + if (!new_tag_stat_entry) { + pr_err("qtaguid: iface_stat: tag stat alloc failed\n"); + goto done; + } + new_tag_stat_entry->tn.tag = tag; + tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree); +done: + return new_tag_stat_entry; +} + +static void if_tag_stat_update(const char *ifname, uid_t uid, + const struct sock *sk, enum ifs_tx_rx direction, + int proto, int bytes) +{ + struct tag_stat *tag_stat_entry; + tag_t tag, acct_tag; + tag_t uid_tag; + struct data_counters *uid_tag_counters; + struct sock_tag *sock_tag_entry; + struct iface_stat *iface_entry; + struct tag_stat *new_tag_stat; + MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s " + "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n", + ifname, uid, sk, direction, proto, bytes); + + + iface_entry = get_iface_entry(ifname); + if (!iface_entry) { + pr_err("qtaguid: iface_stat: stat_update() %s not found\n", + ifname); + return; + } + /* It is ok to process data when an iface_entry is inactive */ + + MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n", + ifname, iface_entry); + + /* + * Look for a tagged sock. + * It will have an acct_uid. + */ + sock_tag_entry = get_sock_stat(sk); + if (sock_tag_entry) { + tag = sock_tag_entry->tag; + acct_tag = get_atag_from_tag(tag); + uid_tag = get_utag_from_tag(tag); + } else { + acct_tag = make_atag_from_value(0); + tag = combine_atag_with_uid(acct_tag, uid); + uid_tag = make_tag_from_uid(uid); + } + MT_DEBUG("qtaguid: iface_stat: stat_update(): " + " looking for tag=0x%llx (uid=%u) in ife=%p\n", + tag, get_uid_from_tag(tag), iface_entry); + /* Loop over tag list under this interface for {acct_tag,uid_tag} */ + spin_lock_bh(&iface_entry->tag_stat_list_lock); + + tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree, + tag); + if (tag_stat_entry) { + /* + * Updating the {acct_tag, uid_tag} entry handles both stats: + * {0, uid_tag} will also get updated. + */ + tag_stat_update(tag_stat_entry, direction, proto, bytes); + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + return; + } + + /* Loop over tag list under this interface for {0,uid_tag} */ + tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree, + uid_tag); + if (!tag_stat_entry) { + /* Here: the base uid_tag did not exist */ + /* + * No parent counters. So + * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats. + */ + new_tag_stat = create_if_tag_stat(iface_entry, uid_tag); + uid_tag_counters = &new_tag_stat->counters; + } else { + uid_tag_counters = &tag_stat_entry->counters; + } + + if (acct_tag) { + new_tag_stat = create_if_tag_stat(iface_entry, tag); + new_tag_stat->parent_counters = uid_tag_counters; + } + tag_stat_update(new_tag_stat, direction, proto, bytes); + spin_unlock_bh(&iface_entry->tag_stat_list_lock); +} + +static int iface_netdev_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) { + struct net_device *dev = ptr; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: netdev_event(): " + "ev=0x%lx/%s netdev=%p->name=%s\n", + event, netdev_evt_str(event), dev, dev ? dev->name : ""); + + switch (event) { + case NETDEV_UP: + iface_stat_create(dev, NULL); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static int iface_inet6addr_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = ptr; + struct net_device *dev; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): " + "ev=0x%lx/%s ifa=%p\n", + event, netdev_evt_str(event), ifa); + + switch (event) { + case NETDEV_UP: + BUG_ON(!ifa || !ifa->idev); + dev = (struct net_device *)ifa->idev->dev; + iface_stat_create_ipv6(dev, ifa); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + BUG_ON(!ifa || !ifa->idev); + dev = (struct net_device *)ifa->idev->dev; + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static int iface_inetaddr_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *dev; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): " + "ev=0x%lx/%s ifa=%p\n", + event, netdev_evt_str(event), ifa); + + switch (event) { + case NETDEV_UP: + BUG_ON(!ifa || !ifa->ifa_dev); + dev = ifa->ifa_dev->dev; + iface_stat_create(dev, ifa); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + BUG_ON(!ifa || !ifa->ifa_dev); + dev = ifa->ifa_dev->dev; + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block iface_netdev_notifier_blk = { + .notifier_call = iface_netdev_event_handler, +}; + +static struct notifier_block iface_inetaddr_notifier_blk = { + .notifier_call = iface_inetaddr_event_handler, +}; + +static struct notifier_block iface_inet6addr_notifier_blk = { + .notifier_call = iface_inet6addr_event_handler, +}; + +static int __init iface_stat_init(struct proc_dir_entry *parent_procdir) +{ + int err; + + iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir); + if (!iface_stat_procdir) { + pr_err("qtaguid: iface_stat: init failed to create proc entry\n"); + err = -1; + goto err; + } + + iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename, + proc_iface_perms, + parent_procdir); + if (!iface_stat_all_procfile) { + pr_err("qtaguid: iface_stat: init " + " failed to create stat_all proc entry\n"); + err = -1; + goto err_zap_entry; + } + iface_stat_all_procfile->read_proc = iface_stat_all_proc_read; + + + err = register_netdevice_notifier(&iface_netdev_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register dev event handler\n"); + goto err_zap_all_stats_entry; + } + err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register ipv4 dev event handler\n"); + goto err_unreg_nd; + } + + err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register ipv6 dev event handler\n"); + goto err_unreg_ip4_addr; + } + return 0; + +err_unreg_ip4_addr: + unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk); +err_unreg_nd: + unregister_netdevice_notifier(&iface_netdev_notifier_blk); +err_zap_all_stats_entry: + remove_proc_entry(iface_stat_all_procfilename, parent_procdir); +err_zap_entry: + remove_proc_entry(iface_stat_procdirname, parent_procdir); +err: + return err; +} + +static struct sock *qtaguid_find_sk(const struct sk_buff *skb, + struct xt_action_param *par) +{ + struct sock *sk; + unsigned int hook_mask = (1 << par->hooknum); + + MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb, + par->hooknum, par->family); + + /* + * Let's not abuse the the xt_socket_get*_sk(), or else it will + * return garbage SKs. + */ + if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS)) + return NULL; + + switch (par->family) { + case NFPROTO_IPV6: + sk = xt_socket_get6_sk(skb, par); + break; + case NFPROTO_IPV4: + sk = xt_socket_get4_sk(skb, par); + break; + default: + return NULL; + } + + /* + * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs. + * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959 + * Not fixed in 3.0-r3 :( + */ + if (sk) { + MT_DEBUG("qtaguid: %p->sk_proto=%u " + "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state); + if (sk->sk_state == TCP_TIME_WAIT) { + xt_socket_put_sk(sk); + sk = NULL; + } + } + return sk; +} + +static void account_for_uid(const struct sk_buff *skb, + const struct sock *alternate_sk, uid_t uid, + struct xt_action_param *par) +{ + const struct net_device *el_dev; + + if (!skb->dev) { + MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum); + el_dev = par->in ? : par->out; + } else { + const struct net_device *other_dev; + el_dev = skb->dev; + other_dev = par->in ? : par->out; + if (el_dev != other_dev) { + MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs " + "par->(in/out)=%p %s\n", + par->hooknum, el_dev, el_dev->name, other_dev, + other_dev->name); + } + } + + if (unlikely(!el_dev)) { + pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum); + } else if (unlikely(!el_dev->name)) { + pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum); + } else { + MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n", + par->hooknum, + el_dev->name, + el_dev->type); + + if_tag_stat_update(el_dev->name, uid, + skb->sk ? skb->sk : alternate_sk, + par->in ? IFS_RX : IFS_TX, + ip_hdr(skb)->protocol, skb->len); + } +} + +static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_qtaguid_match_info *info = par->matchinfo; + const struct file *filp; + bool got_sock = false; + struct sock *sk; + uid_t sock_uid; + bool res; + + if (unlikely(module_passive)) + return (info->match ^ info->invert) == 0; + + MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n", + par->hooknum, skb, par->in, par->out, par->family); + + atomic64_inc(&qtu_events.match_calls); + if (skb == NULL) { + res = (info->match ^ info->invert) == 0; + goto ret_res; + } + + sk = skb->sk; + + if (sk == NULL) { + /* + * A missing sk->sk_socket happens when packets are in-flight + * and the matching socket is already closed and gone. + */ + sk = qtaguid_find_sk(skb, par); + /* + * If we got the socket from the find_sk(), we will need to put + * it back, as nf_tproxy_get_sock_v4() got it. + */ + got_sock = sk; + if (sk) + atomic64_inc(&qtu_events.match_found_sk_in_ct); + else + atomic64_inc(&qtu_events.match_found_no_sk_in_ct); + } else { + atomic64_inc(&qtu_events.match_found_sk); + } + MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n", + par->hooknum, sk, got_sock, ip_hdr(skb)->protocol); + if (sk != NULL) { + MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n", + par->hooknum, sk, sk->sk_socket, + sk->sk_socket ? sk->sk_socket->file : (void *)-1LL); + filp = sk->sk_socket ? sk->sk_socket->file : NULL; + MT_DEBUG("qtaguid[%d]: filp...uid=%u\n", + par->hooknum, filp ? filp->f_cred->fsuid : -1); + } + + if (sk == NULL || sk->sk_socket == NULL) { + /* + * Here, the qtaguid_find_sk() using connection tracking + * couldn't find the owner, so for now we just count them + * against the system. + */ + /* + * TODO: unhack how to force just accounting. + * For now we only do iface stats when the uid-owner is not + * requested. + */ + if (!(info->match & XT_QTAGUID_UID)) + account_for_uid(skb, sk, 0, par); + MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n", + par->hooknum, + sk ? sk->sk_socket : NULL); + res = (info->match ^ info->invert) == 0; + atomic64_inc(&qtu_events.match_no_sk); + goto put_sock_ret_res; + } else if (info->match & info->invert & XT_QTAGUID_SOCKET) { + res = false; + goto put_sock_ret_res; + } + filp = sk->sk_socket->file; + if (filp == NULL) { + MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum); + account_for_uid(skb, sk, 0, par); + res = ((info->match ^ info->invert) & + (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0; + atomic64_inc(&qtu_events.match_no_sk_file); + goto put_sock_ret_res; + } + sock_uid = filp->f_cred->fsuid; + /* + * TODO: unhack how to force just accounting. + * For now we only do iface stats when the uid-owner is not requested + */ + if (!(info->match & XT_QTAGUID_UID)) + account_for_uid(skb, sk, sock_uid, par); + + /* + * The following two tests fail the match when: + * id not in range AND no inverted condition requested + * or id in range AND inverted condition requested + * Thus (!a && b) || (a && !b) == a ^ b + */ + if (info->match & XT_QTAGUID_UID) + if ((filp->f_cred->fsuid >= info->uid_min && + filp->f_cred->fsuid <= info->uid_max) ^ + !(info->invert & XT_QTAGUID_UID)) { + MT_DEBUG("qtaguid[%d]: leaving uid not matching\n", + par->hooknum); + res = false; + goto put_sock_ret_res; + } + if (info->match & XT_QTAGUID_GID) + if ((filp->f_cred->fsgid >= info->gid_min && + filp->f_cred->fsgid <= info->gid_max) ^ + !(info->invert & XT_QTAGUID_GID)) { + MT_DEBUG("qtaguid[%d]: leaving gid not matching\n", + par->hooknum); + res = false; + goto put_sock_ret_res; + } + + MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum); + res = true; + +put_sock_ret_res: + if (got_sock) + xt_socket_put_sk(sk); +ret_res: + MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res); + return res; +} + +#ifdef DDEBUG +/* This function is not in xt_qtaguid_print.c because of locks visibility */ +static void prdebug_full_state(int indent_level, const char *fmt, ...) +{ + va_list args; + char *fmt_buff; + char *buff; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + fmt_buff = kasprintf(GFP_ATOMIC, + "qtaguid: %s(): %s {\n", __func__, fmt); + BUG_ON(!fmt_buff); + va_start(args, fmt); + buff = kvasprintf(GFP_ATOMIC, + fmt_buff, args); + BUG_ON(!buff); + pr_debug("%s", buff); + kfree(fmt_buff); + kfree(buff); + va_end(args); + + spin_lock_bh(&sock_tag_list_lock); + prdebug_sock_tag_tree(indent_level, &sock_tag_tree); + spin_unlock_bh(&sock_tag_list_lock); + + spin_lock_bh(&sock_tag_list_lock); + spin_lock_bh(&uid_tag_data_tree_lock); + prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree); + prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree); + spin_unlock_bh(&uid_tag_data_tree_lock); + spin_unlock_bh(&sock_tag_list_lock); + + spin_lock_bh(&iface_stat_list_lock); + prdebug_iface_stat_list(indent_level, &iface_stat_list); + spin_unlock_bh(&iface_stat_list_lock); + + pr_debug("qtaguid: %s(): }\n", __func__); +} +#else +static void prdebug_full_state(int indent_level, const char *fmt, ...) {} +#endif + +/* + * Procfs reader to get all active socket tags using style "1)" as described in + * fs/proc/generic.c + */ +static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, int *eof, + void *data) +{ + char *outp = page; + int len; + uid_t uid; + struct rb_node *node; + struct sock_tag *sock_tag_entry; + int item_index = 0; + int indent_level = 0; + long f_count; + + if (unlikely(module_passive)) { + *eof = 1; + return 0; + } + + if (*eof) + return 0; + + CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n", + page, items_to_skip, char_count, *eof); + + spin_lock_bh(&sock_tag_list_lock); + for (node = rb_first(&sock_tag_tree); + node; + node = rb_next(node)) { + if (item_index++ < items_to_skip) + continue; + sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); + uid = get_uid_from_tag(sock_tag_entry->tag); + CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) " + "pid=%u\n", + sock_tag_entry->sk, + sock_tag_entry->tag, + uid, + sock_tag_entry->pid + ); + f_count = atomic_long_read( + &sock_tag_entry->socket->file->f_count); + len = snprintf(outp, char_count, + "sock=%p tag=0x%llx (uid=%u) pid=%u " + "f_count=%lu\n", + sock_tag_entry->sk, + sock_tag_entry->tag, uid, + sock_tag_entry->pid, f_count); + if (len >= char_count) { + spin_unlock_bh(&sock_tag_list_lock); + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + spin_unlock_bh(&sock_tag_list_lock); + + if (item_index++ >= items_to_skip) { + len = snprintf(outp, char_count, + "events: sockets_tagged=%llu " + "sockets_untagged=%llu " + "counter_set_changes=%llu " + "delete_cmds=%llu " + "iface_events=%llu " + "match_calls=%llu " + "match_found_sk=%llu " + "match_found_sk_in_ct=%llu " + "match_found_no_sk_in_ct=%llu " + "match_no_sk=%llu " + "match_no_sk_file=%llu\n", + atomic64_read(&qtu_events.sockets_tagged), + atomic64_read(&qtu_events.sockets_untagged), + atomic64_read(&qtu_events.counter_set_changes), + atomic64_read(&qtu_events.delete_cmds), + atomic64_read(&qtu_events.iface_events), + atomic64_read(&qtu_events.match_calls), + atomic64_read(&qtu_events.match_found_sk), + atomic64_read(&qtu_events.match_found_sk_in_ct), + atomic64_read( + &qtu_events.match_found_no_sk_in_ct), + atomic64_read(&qtu_events.match_no_sk), + atomic64_read(&qtu_events.match_no_sk_file)); + if (len >= char_count) { + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + + /* Count the following as part of the last item_index */ + if (item_index > items_to_skip) { + prdebug_full_state(indent_level, "proc ctrl"); + } + + *eof = 1; + return outp - page; +} + +/* + * Delete socket tags, and stat tags associated with a given + * accouting tag and uid. + */ +static int ctrl_cmd_delete(const char *input) +{ + char cmd; + uid_t uid; + uid_t entry_uid; + tag_t acct_tag; + tag_t tag; + int res, argc; + struct iface_stat *iface_entry; + struct rb_node *node; + struct sock_tag *st_entry; + struct rb_root st_to_free_tree = RB_ROOT; + struct tag_stat *ts_entry; + struct tag_counter_set *tcs_entry; + struct tag_ref *tr_entry; + struct uid_tag_data *utd_entry; + + argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid); + CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c " + "user_tag=0x%llx uid=%u\n", input, argc, cmd, + acct_tag, uid); + if (argc < 2) { + res = -EINVAL; + goto err; + } + if (!valid_atag(acct_tag)) { + pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input); + res = -EINVAL; + goto err; + } + if (argc < 3) { + uid = current_fsuid(); + } else if (!can_impersonate_uid(uid)) { + pr_info("qtaguid: ctrl_delete(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err; + } + + tag = combine_atag_with_uid(acct_tag, uid); + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "looking for tag=0x%llx (uid=%u)\n", + input, tag, uid); + + /* Delete socket tags */ + spin_lock_bh(&sock_tag_list_lock); + node = rb_first(&sock_tag_tree); + while (node) { + st_entry = rb_entry(node, struct sock_tag, sock_node); + entry_uid = get_uid_from_tag(st_entry->tag); + node = rb_next(node); + if (entry_uid != uid) + continue; + + CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n", + input, st_entry->tag, entry_uid); + + if (!acct_tag || st_entry->tag == tag) { + rb_erase(&st_entry->sock_node, &sock_tag_tree); + /* Can't sockfd_put() within spinlock, do it later. */ + sock_tag_tree_insert(st_entry, &st_to_free_tree); + tr_entry = lookup_tag_ref(st_entry->tag, NULL); + BUG_ON(tr_entry->num_sock_tags <= 0); + tr_entry->num_sock_tags--; + /* + * TODO: remove if, and start failing. + * This is a hack to work around the fact that in some + * places we have "if (IS_ERR_OR_NULL(pqd_entry))" + * and are trying to work around apps + * that didn't open the /dev/xt_qtaguid. + */ + if (st_entry->list.next && st_entry->list.prev) + list_del(&st_entry->list); + } + } + spin_unlock_bh(&sock_tag_list_lock); + + sock_tag_tree_erase(&st_to_free_tree); + + /* Delete tag counter-sets */ + spin_lock_bh(&tag_counter_set_list_lock); + /* Counter sets are only on the uid tag, not full tag */ + tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (tcs_entry) { + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "erase tcs: tag=0x%llx (uid=%u) set=%d\n", + input, + tcs_entry->tn.tag, + get_uid_from_tag(tcs_entry->tn.tag), + tcs_entry->active_set); + rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree); + kfree(tcs_entry); + } + spin_unlock_bh(&tag_counter_set_list_lock); + + /* + * If acct_tag is 0, then all entries belonging to uid are + * erased. + */ + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(iface_entry, &iface_stat_list, list) { + spin_lock_bh(&iface_entry->tag_stat_list_lock); + node = rb_first(&iface_entry->tag_stat_tree); + while (node) { + ts_entry = rb_entry(node, struct tag_stat, tn.node); + entry_uid = get_uid_from_tag(ts_entry->tn.tag); + node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "ts tag=0x%llx (uid=%u)\n", + input, ts_entry->tn.tag, entry_uid); + + if (entry_uid != uid) + continue; + if (!acct_tag || ts_entry->tn.tag == tag) { + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "erase ts: %s 0x%llx %u\n", + input, iface_entry->ifname, + get_atag_from_tag(ts_entry->tn.tag), + entry_uid); + rb_erase(&ts_entry->tn.node, + &iface_entry->tag_stat_tree); + kfree(ts_entry); + } + } + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + } + spin_unlock_bh(&iface_stat_list_lock); + + /* Cleanup the uid_tag_data */ + spin_lock_bh(&uid_tag_data_tree_lock); + node = rb_first(&uid_tag_data_tree); + while (node) { + utd_entry = rb_entry(node, struct uid_tag_data, node); + entry_uid = utd_entry->uid; + node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "utd uid=%u\n", + input, entry_uid); + + if (entry_uid != uid) + continue; + /* + * Go over the tag_refs, and those that don't have + * sock_tags using them are freed. + */ + put_tag_ref_tree(tag, utd_entry); + put_utd_entry(utd_entry); + } + spin_unlock_bh(&uid_tag_data_tree_lock); + + atomic64_inc(&qtu_events.delete_cmds); + res = 0; + +err: + return res; +} + +static int ctrl_cmd_counter_set(const char *input) +{ + char cmd; + uid_t uid = 0; + tag_t tag; + int res, argc; + struct tag_counter_set *tcs; + int counter_set; + + argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid); + CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c " + "set=%d uid=%u\n", input, argc, cmd, + counter_set, uid); + if (argc != 3) { + res = -EINVAL; + goto err; + } + if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) { + pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n", + input); + res = -EINVAL; + goto err; + } + if (!can_manipulate_uids()) { + pr_info("qtaguid: ctrl_counterset(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err; + } + + tag = make_tag_from_uid(uid); + spin_lock_bh(&tag_counter_set_list_lock); + tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (!tcs) { + tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC); + if (!tcs) { + spin_unlock_bh(&tag_counter_set_list_lock); + pr_err("qtaguid: ctrl_counterset(%s): " + "failed to alloc counter set\n", + input); + res = -ENOMEM; + goto err; + } + tcs->tn.tag = tag; + tag_counter_set_tree_insert(tcs, &tag_counter_set_tree); + CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx " + "(uid=%u) set=%d\n", + input, tag, get_uid_from_tag(tag), counter_set); + } + tcs->active_set = counter_set; + spin_unlock_bh(&tag_counter_set_list_lock); + atomic64_inc(&qtu_events.counter_set_changes); + res = 0; + +err: + return res; +} + +static int ctrl_cmd_tag(const char *input) +{ + char cmd; + int sock_fd = 0; + uid_t uid = 0; + tag_t acct_tag = make_atag_from_value(0); + tag_t full_tag; + struct socket *el_socket; + int res, argc; + struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *uid_tag_data_entry; + struct proc_qtu_data *pqd_entry; + + /* Unassigned args will get defaulted later. */ + argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid); + CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d " + "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd, + acct_tag, uid); + if (argc < 2) { + res = -EINVAL; + goto err; + } + el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */ + if (!el_socket) { + pr_info("qtaguid: ctrl_tag(%s): failed to lookup" + " sock_fd=%d err=%d\n", input, sock_fd, res); + goto err; + } + CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); + if (argc < 3) { + acct_tag = make_atag_from_value(0); + } else if (!valid_atag(acct_tag)) { + pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input); + res = -EINVAL; + goto err_put; + } + CT_DEBUG("qtaguid: ctrl_tag(%s): " + "pid=%u tgid=%u uid=%u euid=%u fsuid=%u " + "in_group=%d in_egroup=%d\n", + input, current->pid, current->tgid, current_uid(), + current_euid(), current_fsuid(), + in_group_p(proc_ctrl_write_gid), + in_egroup_p(proc_ctrl_write_gid)); + if (argc < 4) { + uid = current_fsuid(); + } else if (!can_impersonate_uid(uid)) { + pr_info("qtaguid: ctrl_tag(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err_put; + } + full_tag = combine_atag_with_uid(acct_tag, uid); + + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(el_socket->sk); + tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry); + if (IS_ERR(tag_ref_entry)) { + res = PTR_ERR(tag_ref_entry); + spin_unlock_bh(&sock_tag_list_lock); + goto err_put; + } + tag_ref_entry->num_sock_tags++; + if (sock_tag_entry) { + struct tag_ref *prev_tag_ref_entry; + + CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p " + "st@%p ...->f_count=%ld\n", + input, el_socket->sk, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); + /* + * This is a re-tagging, so release the sock_fd that was + * locked at the time of the 1st tagging. + * There is still the ref from this call's sockfd_lookup() so + * it can be done within the spinlock. + */ + sockfd_put(sock_tag_entry->socket); + prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, + &uid_tag_data_entry); + BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry)); + BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0); + prev_tag_ref_entry->num_sock_tags--; + sock_tag_entry->tag = full_tag; + } else { + CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n", + input, el_socket->sk); + sock_tag_entry = kzalloc(sizeof(*sock_tag_entry), + GFP_ATOMIC); + if (!sock_tag_entry) { + pr_err("qtaguid: ctrl_tag(%s): " + "socket tag alloc failed\n", + input); + spin_unlock_bh(&sock_tag_list_lock); + res = -ENOMEM; + goto err_tag_unref_put; + } + sock_tag_entry->sk = el_socket->sk; + sock_tag_entry->socket = el_socket; + sock_tag_entry->pid = current->tgid; + sock_tag_entry->tag = combine_atag_with_uid(acct_tag, + uid); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + if (IS_ERR_OR_NULL(pqd_entry)) + pr_warn_once( + "qtaguid: %s(): " + "User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", __func__, + current->pid, current->tgid, + current_fsuid()); + else + list_add(&sock_tag_entry->list, + &pqd_entry->sock_tag_list); + spin_unlock_bh(&uid_tag_data_tree_lock); + + sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree); + atomic64_inc(&qtu_events.sockets_tagged); + } + spin_unlock_bh(&sock_tag_list_lock); + /* We keep the ref to the socket (file) until it is untagged */ + CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); + return 0; + +err_tag_unref_put: + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + tag_ref_entry->num_sock_tags--; + free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry); +err_put: + CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); + /* Release the sock_fd that was grabbed by sockfd_lookup(). */ + sockfd_put(el_socket); + return res; + +err: + CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input); + return res; +} + +static int ctrl_cmd_untag(const char *input) +{ + char cmd; + int sock_fd = 0; + struct socket *el_socket; + int res, argc; + struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; + + argc = sscanf(input, "%c %d", &cmd, &sock_fd); + CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n", + input, argc, cmd, sock_fd); + if (argc < 2) { + res = -EINVAL; + goto err; + } + el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */ + if (!el_socket) { + pr_info("qtaguid: ctrl_untag(%s): failed to lookup" + " sock_fd=%d err=%d\n", input, sock_fd, res); + goto err; + } + CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(el_socket->sk); + if (!sock_tag_entry) { + spin_unlock_bh(&sock_tag_list_lock); + res = -EINVAL; + goto err_put; + } + /* + * The socket already belongs to the current process + * so it can do whatever it wants to it. + */ + rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree); + + tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry); + BUG_ON(!tag_ref_entry); + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + if (IS_ERR_OR_NULL(pqd_entry)) + pr_warn_once("qtaguid: %s(): " + "User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", __func__, + current->pid, current->tgid, current_fsuid()); + else + list_del(&sock_tag_entry->list); + spin_unlock_bh(&uid_tag_data_tree_lock); + /* + * We don't free tag_ref from the utd_entry here, + * only during a cmd_delete(). + */ + tag_ref_entry->num_sock_tags--; + spin_unlock_bh(&sock_tag_list_lock); + /* + * Release the sock_fd that was grabbed at tag time, + * and once more for the sockfd_lookup() here. + */ + sockfd_put(sock_tag_entry->socket); + CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count) - 1); + sockfd_put(el_socket); + + kfree(sock_tag_entry); + atomic64_inc(&qtu_events.sockets_untagged); + + return 0; + +err_put: + CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); + /* Release the sock_fd that was grabbed by sockfd_lookup(). */ + sockfd_put(el_socket); + return res; + +err: + CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input); + return res; +} + +static int qtaguid_ctrl_parse(const char *input, int count) +{ + char cmd; + int res; + + cmd = input[0]; + /* Collect params for commands */ + switch (cmd) { + case 'd': + res = ctrl_cmd_delete(input); + break; + + case 's': + res = ctrl_cmd_counter_set(input); + break; + + case 't': + res = ctrl_cmd_tag(input); + break; + + case 'u': + res = ctrl_cmd_untag(input); + break; + + default: + res = -EINVAL; + goto err; + } + if (!res) + res = count; +err: + CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res); + return res; +} + +#define MAX_QTAGUID_CTRL_INPUT_LEN 255 +static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN]; + + if (unlikely(module_passive)) + return count; + + if (count >= MAX_QTAGUID_CTRL_INPUT_LEN) + return -EINVAL; + + if (copy_from_user(input_buf, buffer, count)) + return -EFAULT; + + input_buf[count] = '\0'; + return qtaguid_ctrl_parse(input_buf, count); +} + +struct proc_print_info { + char *outp; + char **num_items_returned; + struct iface_stat *iface_entry; + struct tag_stat *ts_entry; + int item_index; + int items_to_skip; + int char_count; +}; + +static int pp_stats_line(struct proc_print_info *ppi, int cnt_set) +{ + int len; + struct data_counters *cnts; + + if (!ppi->item_index) { + if (ppi->item_index++ < ppi->items_to_skip) + return 0; + len = snprintf(ppi->outp, ppi->char_count, + "idx iface acct_tag_hex uid_tag_int cnt_set " + "rx_bytes rx_packets " + "tx_bytes tx_packets " + "rx_tcp_bytes rx_tcp_packets " + "rx_udp_bytes rx_udp_packets " + "rx_other_bytes rx_other_packets " + "tx_tcp_bytes tx_tcp_packets " + "tx_udp_bytes tx_udp_packets " + "tx_other_bytes tx_other_packets\n"); + } else { + tag_t tag = ppi->ts_entry->tn.tag; + uid_t stat_uid = get_uid_from_tag(tag); + + if (!can_read_other_uid_stats(stat_uid)) { + CT_DEBUG("qtaguid: stats line: " + "%s 0x%llx %u: insufficient priv " + "from pid=%u tgid=%u uid=%u\n", + ppi->iface_entry->ifname, + get_atag_from_tag(tag), stat_uid, + current->pid, current->tgid, current_fsuid()); + return 0; + } + if (ppi->item_index++ < ppi->items_to_skip) + return 0; + cnts = &ppi->ts_entry->counters; + len = snprintf( + ppi->outp, ppi->char_count, + "%d %s 0x%llx %u %u " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu\n", + ppi->item_index, + ppi->iface_entry->ifname, + get_atag_from_tag(tag), + stat_uid, + cnt_set, + dc_sum_bytes(cnts, cnt_set, IFS_RX), + dc_sum_packets(cnts, cnt_set, IFS_RX), + dc_sum_bytes(cnts, cnt_set, IFS_TX), + dc_sum_packets(cnts, cnt_set, IFS_TX), + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets); + } + return len; +} + +static bool pp_sets(struct proc_print_info *ppi) +{ + int len; + int counter_set; + for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS; + counter_set++) { + len = pp_stats_line(ppi, counter_set); + if (len >= ppi->char_count) { + *ppi->outp = '\0'; + return false; + } + if (len) { + ppi->outp += len; + ppi->char_count -= len; + (*ppi->num_items_returned)++; + } + } + return true; +} + +/* + * Procfs reader to get all tag stats using style "1)" as described in + * fs/proc/generic.c + * Groups all protocols tx/rx bytes. + */ +static int qtaguid_stats_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, int *eof, + void *data) +{ + struct proc_print_info ppi; + int len; + + ppi.outp = page; + ppi.item_index = 0; + ppi.char_count = char_count; + ppi.num_items_returned = num_items_returned; + ppi.items_to_skip = items_to_skip; + + if (unlikely(module_passive)) { + len = pp_stats_line(&ppi, 0); + /* The header should always be shorter than the buffer. */ + BUG_ON(len >= ppi.char_count); + (*num_items_returned)++; + *eof = 1; + return len; + } + + CT_DEBUG("qtaguid:proc stats page=%p *num_items_returned=%p off=%ld " + "char_count=%d *eof=%d\n", page, *num_items_returned, + items_to_skip, char_count, *eof); + + if (*eof) + return 0; + + /* The idx is there to help debug when things go belly up. */ + len = pp_stats_line(&ppi, 0); + /* Don't advance the outp unless the whole line was printed */ + if (len >= ppi.char_count) { + *ppi.outp = '\0'; + return ppi.outp - page; + } + if (len) { + ppi.outp += len; + ppi.char_count -= len; + (*num_items_returned)++; + } + + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) { + struct rb_node *node; + spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock); + for (node = rb_first(&ppi.iface_entry->tag_stat_tree); + node; + node = rb_next(node)) { + ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node); + if (!pp_sets(&ppi)) { + spin_unlock_bh( + &ppi.iface_entry->tag_stat_list_lock); + spin_unlock_bh(&iface_stat_list_lock); + return ppi.outp - page; + } + } + spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock); + } + spin_unlock_bh(&iface_stat_list_lock); + + *eof = 1; + return ppi.outp - page; +} + +/*------------------------------------------*/ +static int qtudev_open(struct inode *inode, struct file *file) +{ + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; + struct proc_qtu_data *new_pqd_entry; + int res; + bool utd_entry_found; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n", + current->pid, current->tgid, current_fsuid()); + + spin_lock_bh(&uid_tag_data_tree_lock); + + /* Look for existing uid data, or alloc one. */ + utd_entry = get_uid_data(current_fsuid(), &utd_entry_found); + if (IS_ERR_OR_NULL(utd_entry)) { + res = PTR_ERR(utd_entry); + goto err; + } + + /* Look for existing PID based proc_data */ + pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree, + current->tgid); + if (pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u " + "%s already opened\n", + current->pid, current->tgid, current_fsuid(), + QTU_DEV_NAME); + res = -EBUSY; + goto err_unlock_free_utd; + } + + new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC); + if (!new_pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u: " + "proc data alloc failed\n", + current->pid, current->tgid, current_fsuid()); + res = -ENOMEM; + goto err_unlock_free_utd; + } + new_pqd_entry->pid = current->tgid; + INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list); + new_pqd_entry->parent_tag_data = utd_entry; + utd_entry->num_pqd++; + + proc_qtu_data_tree_insert(new_pqd_entry, + &proc_qtu_data_tree); + + spin_unlock_bh(&uid_tag_data_tree_lock); + DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n", + current_fsuid(), new_pqd_entry); + file->private_data = new_pqd_entry; + return 0; + +err_unlock_free_utd: + if (!utd_entry_found) { + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } + spin_unlock_bh(&uid_tag_data_tree_lock); +err: + return res; +} + +static int qtudev_release(struct inode *inode, struct file *file) +{ + struct proc_qtu_data *pqd_entry = file->private_data; + struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data; + struct sock_tag *st_entry; + struct rb_root st_to_free_tree = RB_ROOT; + struct list_head *entry, *next; + struct tag_ref *tr; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + /* + * Do not trust the current->pid, it might just be a kworker cleaning + * up after a dead proc. + */ + DR_DEBUG("qtaguid: qtudev_release(): " + "pid=%u tgid=%u uid=%u " + "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n", + current->pid, current->tgid, pqd_entry->parent_tag_data->uid, + pqd_entry, pqd_entry->pid, utd_entry, + utd_entry->num_active_tags); + + spin_lock_bh(&sock_tag_list_lock); + spin_lock_bh(&uid_tag_data_tree_lock); + + list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) { + st_entry = list_entry(entry, struct sock_tag, list); + DR_DEBUG("qtaguid: %s(): " + "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n", + __func__, + st_entry, st_entry->sk, + current->pid, current->tgid, + pqd_entry->parent_tag_data->uid); + + utd_entry = uid_tag_data_tree_search( + &uid_tag_data_tree, + get_uid_from_tag(st_entry->tag)); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + DR_DEBUG("qtaguid: %s(): " + "looking for tag=0x%llx in utd_entry=%p\n", __func__, + st_entry->tag, utd_entry); + tr = tag_ref_tree_search(&utd_entry->tag_ref_tree, + st_entry->tag); + BUG_ON(!tr); + BUG_ON(tr->num_sock_tags <= 0); + tr->num_sock_tags--; + free_tag_ref_from_utd_entry(tr, utd_entry); + + rb_erase(&st_entry->sock_node, &sock_tag_tree); + list_del(&st_entry->list); + /* Can't sockfd_put() within spinlock, do it later. */ + sock_tag_tree_insert(st_entry, &st_to_free_tree); + + /* + * Try to free the utd_entry if no other proc_qtu_data is + * using it (num_pqd is 0) and it doesn't have active tags + * (num_active_tags is 0). + */ + put_utd_entry(utd_entry); + } + + rb_erase(&pqd_entry->node, &proc_qtu_data_tree); + BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1); + pqd_entry->parent_tag_data->num_pqd--; + put_utd_entry(pqd_entry->parent_tag_data); + kfree(pqd_entry); + file->private_data = NULL; + + spin_unlock_bh(&uid_tag_data_tree_lock); + spin_unlock_bh(&sock_tag_list_lock); + + + sock_tag_tree_erase(&st_to_free_tree); + + prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__, + current->pid, current->tgid); + return 0; +} + +/*------------------------------------------*/ +static const struct file_operations qtudev_fops = { + .owner = THIS_MODULE, + .open = qtudev_open, + .release = qtudev_release, +}; + +static struct miscdevice qtu_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = QTU_DEV_NAME, + .fops = &qtudev_fops, + /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */ +}; + +/*------------------------------------------*/ +static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir) +{ + int ret; + *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net); + if (!*res_procdir) { + pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n"); + ret = -ENOMEM; + goto no_dir; + } + + xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms, + *res_procdir); + if (!xt_qtaguid_ctrl_file) { + pr_err("qtaguid: failed to create xt_qtaguid/ctrl " + " file\n"); + ret = -ENOMEM; + goto no_ctrl_entry; + } + xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read; + xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write; + + xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms, + *res_procdir); + if (!xt_qtaguid_stats_file) { + pr_err("qtaguid: failed to create xt_qtaguid/stats " + "file\n"); + ret = -ENOMEM; + goto no_stats_entry; + } + xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read; + /* + * TODO: add support counter hacking + * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write; + */ + return 0; + +no_stats_entry: + remove_proc_entry("ctrl", *res_procdir); +no_ctrl_entry: + remove_proc_entry("xt_qtaguid", NULL); +no_dir: + return ret; +} + +static struct xt_match qtaguid_mt_reg __read_mostly = { + /* + * This module masquerades as the "owner" module so that iptables + * tools can deal with it. + */ + .name = "owner", + .revision = 1, + .family = NFPROTO_UNSPEC, + .match = qtaguid_mt, + .matchsize = sizeof(struct xt_qtaguid_match_info), + .me = THIS_MODULE, +}; + +static int __init qtaguid_mt_init(void) +{ + if (qtaguid_proc_register(&xt_qtaguid_procdir) + || iface_stat_init(xt_qtaguid_procdir) + || xt_register_match(&qtaguid_mt_reg) + || misc_register(&qtu_device)) + return -1; + return 0; +} + +/* + * TODO: allow unloading of the module. + * For now stats are permanent. + * Kconfig forces'y/n' and never an 'm'. + */ + +module_init(qtaguid_mt_init); +MODULE_AUTHOR("jpa <jpa@google.com>"); +MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_owner"); +MODULE_ALIAS("ip6t_owner"); +MODULE_ALIAS("ipt_qtaguid"); +MODULE_ALIAS("ip6t_qtaguid"); diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h new file mode 100644 index 000000000000..02479d6d317d --- /dev/null +++ b/net/netfilter/xt_qtaguid_internal.h @@ -0,0 +1,330 @@ +/* + * Kernel iptables module to track stats for packets based on user tags. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_INTERNAL_H__ +#define __XT_QTAGUID_INTERNAL_H__ + +#include <linux/types.h> +#include <linux/rbtree.h> +#include <linux/spinlock_types.h> +#include <linux/workqueue.h> + +/* Iface handling */ +#define IDEBUG_MASK (1<<0) +/* Iptable Matching. Per packet. */ +#define MDEBUG_MASK (1<<1) +/* Red-black tree handling. Per packet. */ +#define RDEBUG_MASK (1<<2) +/* procfs ctrl/stats handling */ +#define CDEBUG_MASK (1<<3) +/* dev and resource tracking */ +#define DDEBUG_MASK (1<<4) + +/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */ +#define DEFAULT_DEBUG_MASK 0 + +/* + * (Un)Define these *DEBUG to compile out/in the pr_debug calls. + * All undef: text size ~ 0x3030; all def: ~ 0x4404. + */ +#define IDEBUG +#define MDEBUG +#define RDEBUG +#define CDEBUG +#define DDEBUG + +#define MSK_DEBUG(mask, ...) do { \ + if (unlikely(qtaguid_debug_mask & (mask))) \ + pr_debug(__VA_ARGS__); \ + } while (0) +#ifdef IDEBUG +#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__) +#else +#define IF_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef MDEBUG +#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__) +#else +#define MT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef RDEBUG +#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__) +#else +#define RB_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef CDEBUG +#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__) +#else +#define CT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef DDEBUG +#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__) +#else +#define DR_DEBUG(...) no_printk(__VA_ARGS__) +#endif + +extern uint qtaguid_debug_mask; + +/*---------------------------------------------------------------------------*/ +/* + * Tags: + * + * They represent what the data usage counters will be tracked against. + * By default a tag is just based on the UID. + * The UID is used as the base for policing, and can not be ignored. + * So a tag will always at least represent a UID (uid_tag). + * + * A tag can be augmented with an "accounting tag" which is associated + * with a UID. + * User space can set the acct_tag portion of the tag which is then used + * with sockets: all data belonging to that socket will be counted against the + * tag. The policing is then based on the tag's uid_tag portion, + * and stats are collected for the acct_tag portion separately. + * + * There could be + * a: {acct_tag=1, uid_tag=10003} + * b: {acct_tag=2, uid_tag=10003} + * c: {acct_tag=3, uid_tag=10003} + * d: {acct_tag=0, uid_tag=10003} + * a, b, and c represent tags associated with specific sockets. + * d is for the totals for that uid, including all untagged traffic. + * Typically d is used with policing/quota rules. + * + * We want tag_t big enough to distinguish uid_t and acct_tag. + * It might become a struct if needed. + * Nothing should be using it as an int. + */ +typedef uint64_t tag_t; /* Only used via accessors */ + +#define TAG_UID_MASK 0xFFFFFFFFULL +#define TAG_ACCT_MASK (~0xFFFFFFFFULL) + +static inline int tag_compare(tag_t t1, tag_t t2) +{ + return t1 < t2 ? -1 : t1 == t2 ? 0 : 1; +} + +static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid) +{ + return acct_tag | uid; +} +static inline tag_t make_tag_from_uid(uid_t uid) +{ + return uid; +} +static inline uid_t get_uid_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_utag_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_atag_from_tag(tag_t tag) +{ + return tag & TAG_ACCT_MASK; +} + +static inline bool valid_atag(tag_t tag) +{ + return !(tag & TAG_UID_MASK); +} +static inline tag_t make_atag_from_value(uint32_t value) +{ + return (uint64_t)value << 32; +} +/*---------------------------------------------------------------------------*/ + +/* + * Maximum number of socket tags that a UID is allowed to have active. + * Multiple processes belonging to the same UID contribute towards this limit. + * Special UIDs that can impersonate a UID also contribute (e.g. download + * manager, ...) + */ +#define DEFAULT_MAX_SOCK_TAGS 1024 + +/* + * For now we only track 2 sets of counters. + * The default set is 0. + * Userspace can activate another set for a given uid being tracked. + */ +#define IFS_MAX_COUNTER_SETS 2 + +enum ifs_tx_rx { + IFS_TX, + IFS_RX, + IFS_MAX_DIRECTIONS +}; + +/* For now, TCP, UDP, the rest */ +enum ifs_proto { + IFS_TCP, + IFS_UDP, + IFS_PROTO_OTHER, + IFS_MAX_PROTOS +}; + +struct byte_packet_counters { + uint64_t bytes; + uint64_t packets; +}; + +struct data_counters { + struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS]; +}; + +/* Generic X based nodes used as a base for rb_tree ops */ +struct tag_node { + struct rb_node node; + tag_t tag; +}; + +struct tag_stat { + struct tag_node tn; + struct data_counters counters; + /* + * If this tag is acct_tag based, we need to count against the + * matching parent uid_tag. + */ + struct data_counters *parent_counters; +}; + +struct iface_stat { + struct list_head list; /* in iface_stat_list */ + char *ifname; + bool active; + /* net_dev is only valid for active iface_stat */ + struct net_device *net_dev; + + struct byte_packet_counters totals[IFS_MAX_DIRECTIONS]; + /* + * We keep the last_known, because some devices reset their counters + * just before NETDEV_UP, while some will reset just before + * NETDEV_REGISTER (which is more normal). + * So now, if the device didn't do a NETDEV_UNREGISTER and we see + * its current dev stats smaller that what was previously known, we + * assume an UNREGISTER and just use the last_known. + */ + struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS]; + /* last_known is usable when last_known_valid is true */ + bool last_known_valid; + + struct proc_dir_entry *proc_ptr; + + struct rb_root tag_stat_tree; + spinlock_t tag_stat_list_lock; +}; + +/* This is needed to create proc_dir_entries from atomic context. */ +struct iface_stat_work { + struct work_struct iface_work; + struct iface_stat *iface_entry; +}; + +/* + * Track tag that this socket is transferring data for, and not necessarily + * the uid that owns the socket. + * This is the tag against which tag_stat.counters will be billed. + * These structs need to be looked up by sock and pid. + */ +struct sock_tag { + struct rb_node sock_node; + struct sock *sk; /* Only used as a number, never dereferenced */ + /* The socket is needed for sockfd_put() */ + struct socket *socket; + /* Used to associate with a given pid */ + struct list_head list; /* in proc_qtu_data.sock_tag_list */ + pid_t pid; + + tag_t tag; +}; + +struct qtaguid_event_counts { + /* Various successful events */ + atomic64_t sockets_tagged; + atomic64_t sockets_untagged; + atomic64_t counter_set_changes; + atomic64_t delete_cmds; + atomic64_t iface_events; /* Number of NETDEV_* events handled */ + + atomic64_t match_calls; /* Number of times iptables called mt */ + /* + * match_found_sk_*: numbers related to the netfilter matching + * function finding a sock for the sk_buff. + * Total skbs processed is sum(match_found*). + */ + atomic64_t match_found_sk; /* An sk was already in the sk_buff. */ + /* The connection tracker had or didn't have the sk. */ + atomic64_t match_found_sk_in_ct; + atomic64_t match_found_no_sk_in_ct; + /* + * No sk could be found. No apparent owner. Could happen with + * unsolicited traffic. + */ + atomic64_t match_no_sk; + /* + * The file ptr in the sk_socket wasn't there. + * This might happen for traffic while the socket is being closed. + */ + atomic64_t match_no_sk_file; +}; + +/* Track the set active_set for the given tag. */ +struct tag_counter_set { + struct tag_node tn; + int active_set; +}; + +/*----------------------------------------------*/ +/* + * The qtu uid data is used to track resources that are created directly or + * indirectly by processes (uid tracked). + * It is shared by the processes with the same uid. + * Some of the resource will be counted to prevent further rogue allocations, + * some will need freeing once the owner process (uid) exits. + */ +struct uid_tag_data { + struct rb_node node; + uid_t uid; + + /* + * For the uid, how many accounting tags have been set. + */ + int num_active_tags; + /* Track the number of proc_qtu_data that reference it */ + int num_pqd; + struct rb_root tag_ref_tree; + /* No tag_node_tree_lock; use uid_tag_data_tree_lock */ +}; + +struct tag_ref { + struct tag_node tn; + + /* + * This tracks the number of active sockets that have a tag on them + * which matches this tag_ref.tn.tag. + * A tag ref can live on after the sockets are untagged. + * A tag ref can only be removed during a tag delete command. + */ + int num_sock_tags; +}; + +struct proc_qtu_data { + struct rb_node node; + pid_t pid; + + struct uid_tag_data *parent_tag_data; + + /* Tracks the sock_tags that need freeing upon this proc's death */ + struct list_head sock_tag_list; + /* No spinlock_t sock_tag_list_lock; use the global one. */ +}; + +/*----------------------------------------------*/ +#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */ diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c new file mode 100644 index 000000000000..39176785c91f --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.c @@ -0,0 +1,556 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Most of the functions in this file just waste time if DEBUG is not defined. + * The matching xt_qtaguid_print.h will static inline empty funcs if the needed + * debug flags ore not defined. + * Those funcs that fail to allocate memory will panic as there is no need to + * hobble allong just pretending to do the requested work. + */ + +#define DEBUG + +#include <linux/fs.h> +#include <linux/gfp.h> +#include <linux/net.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> + + +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + +#ifdef DDEBUG + +static void _bug_on_err_or_null(void *ptr) +{ + if (IS_ERR_OR_NULL(ptr)) { + pr_err("qtaguid: kmalloc failed\n"); + BUG(); + } +} + +char *pp_tag_t(tag_t *tag) +{ + char *res; + + if (!tag) + res = kasprintf(GFP_ATOMIC, "tag_t@null{}"); + else + res = kasprintf(GFP_ATOMIC, + "tag_t@%p{tag=0x%llx, uid=%u}", + tag, *tag, get_uid_from_tag(*tag)); + _bug_on_err_or_null(res); + return res; +} + +char *pp_data_counters(struct data_counters *dc, bool showValues) +{ + char *res; + + if (!dc) + res = kasprintf(GFP_ATOMIC, "data_counters@null{}"); + else if (showValues) + res = kasprintf( + GFP_ATOMIC, "data_counters@%p{" + "set0{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}, " + "set1{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}}", + dc, + dc->bpc[0][IFS_RX][IFS_TCP].bytes, + dc->bpc[0][IFS_RX][IFS_TCP].packets, + dc->bpc[0][IFS_RX][IFS_UDP].bytes, + dc->bpc[0][IFS_RX][IFS_UDP].packets, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[0][IFS_TX][IFS_TCP].bytes, + dc->bpc[0][IFS_TX][IFS_TCP].packets, + dc->bpc[0][IFS_TX][IFS_UDP].bytes, + dc->bpc[0][IFS_TX][IFS_UDP].packets, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_RX][IFS_TCP].bytes, + dc->bpc[1][IFS_RX][IFS_TCP].packets, + dc->bpc[1][IFS_RX][IFS_UDP].bytes, + dc->bpc[1][IFS_RX][IFS_UDP].packets, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_TX][IFS_TCP].bytes, + dc->bpc[1][IFS_TX][IFS_TCP].packets, + dc->bpc[1][IFS_TX][IFS_UDP].bytes, + dc->bpc[1][IFS_TX][IFS_UDP].packets, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets); + else + res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc); + _bug_on_err_or_null(res); + return res; +} + +char *pp_tag_node(struct tag_node *tn) +{ + char *tag_str; + char *res; + + if (!tn) { + res = kasprintf(GFP_ATOMIC, "tag_node@null{}"); + _bug_on_err_or_null(res); + return res; + } + tag_str = pp_tag_t(&tn->tag); + res = kasprintf(GFP_ATOMIC, + "tag_node@%p{tag=%s}", + tn, tag_str); + _bug_on_err_or_null(res); + kfree(tag_str); + return res; +} + +char *pp_tag_ref(struct tag_ref *tr) +{ + char *tn_str; + char *res; + + if (!tr) { + res = kasprintf(GFP_ATOMIC, "tag_ref@null{}"); + _bug_on_err_or_null(res); + return res; + } + tn_str = pp_tag_node(&tr->tn); + res = kasprintf(GFP_ATOMIC, + "tag_ref@%p{%s, num_sock_tags=%d}", + tr, tn_str, tr->num_sock_tags); + _bug_on_err_or_null(res); + kfree(tn_str); + return res; +} + +char *pp_tag_stat(struct tag_stat *ts) +{ + char *tn_str; + char *counters_str; + char *parent_counters_str; + char *res; + + if (!ts) { + res = kasprintf(GFP_ATOMIC, "tag_stat@null{}"); + _bug_on_err_or_null(res); + return res; + } + tn_str = pp_tag_node(&ts->tn); + counters_str = pp_data_counters(&ts->counters, true); + parent_counters_str = pp_data_counters(ts->parent_counters, false); + res = kasprintf(GFP_ATOMIC, + "tag_stat@%p{%s, counters=%s, parent_counters=%s}", + ts, tn_str, counters_str, parent_counters_str); + _bug_on_err_or_null(res); + kfree(tn_str); + kfree(counters_str); + kfree(parent_counters_str); + return res; +} + +char *pp_iface_stat(struct iface_stat *is) +{ + char *res; + if (!is) + res = kasprintf(GFP_ATOMIC, "iface_stat@null{}"); + else + res = kasprintf(GFP_ATOMIC, "iface_stat@%p{" + "list=list_head{...}, " + "ifname=%s, " + "total={rx={bytes=%llu, " + "packets=%llu}, " + "tx={bytes=%llu, " + "packets=%llu}}, " + "last_known_valid=%d, " + "last_known={rx={bytes=%llu, " + "packets=%llu}, " + "tx={bytes=%llu, " + "packets=%llu}}, " + "active=%d, " + "net_dev=%p, " + "proc_ptr=%p, " + "tag_stat_tree=rb_root{...}}", + is, + is->ifname, + is->totals[IFS_RX].bytes, + is->totals[IFS_RX].packets, + is->totals[IFS_TX].bytes, + is->totals[IFS_TX].packets, + is->last_known_valid, + is->last_known[IFS_RX].bytes, + is->last_known[IFS_RX].packets, + is->last_known[IFS_TX].bytes, + is->last_known[IFS_TX].packets, + is->active, + is->net_dev, + is->proc_ptr); + _bug_on_err_or_null(res); + return res; +} + +char *pp_sock_tag(struct sock_tag *st) +{ + char *tag_str; + char *res; + + if (!st) { + res = kasprintf(GFP_ATOMIC, "sock_tag@null{}"); + _bug_on_err_or_null(res); + return res; + } + tag_str = pp_tag_t(&st->tag); + res = kasprintf(GFP_ATOMIC, "sock_tag@%p{" + "sock_node=rb_node{...}, " + "sk=%p socket=%p (f_count=%lu), list=list_head{...}, " + "pid=%u, tag=%s}", + st, st->sk, st->socket, atomic_long_read( + &st->socket->file->f_count), + st->pid, tag_str); + _bug_on_err_or_null(res); + kfree(tag_str); + return res; +} + +char *pp_uid_tag_data(struct uid_tag_data *utd) +{ + char *res; + + if (!utd) + res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}"); + else + res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{" + "uid=%u, num_active_acct_tags=%d, " + "num_pqd=%d, " + "tag_node_tree=rb_root{...}, " + "proc_qtu_data_tree=rb_root{...}}", + utd, utd->uid, + utd->num_active_tags, utd->num_pqd); + _bug_on_err_or_null(res); + return res; +} + +char *pp_proc_qtu_data(struct proc_qtu_data *pqd) +{ + char *parent_tag_data_str; + char *res; + + if (!pqd) { + res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}"); + _bug_on_err_or_null(res); + return res; + } + parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data); + res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{" + "node=rb_node{...}, pid=%u, " + "parent_tag_data=%s, " + "sock_tag_list=list_head{...}}", + pqd, pqd->pid, parent_tag_data_str + ); + _bug_on_err_or_null(res); + kfree(parent_tag_data_str); + return res; +} + +/*------------------------------------------*/ +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree) +{ + struct rb_node *node; + struct sock_tag *sock_tag_entry; + char *str; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(sock_tag_tree)) { + str = "sock_tag_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "sock_tag_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(sock_tag_tree); + node; + node = rb_next(node)) { + sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); + str = pp_sock_tag(sock_tag_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list) +{ + struct sock_tag *sock_tag_entry; + char *str; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (list_empty(sock_tag_list)) { + str = "sock_tag_list=list_head{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "sock_tag_list=list_head{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(sock_tag_entry, sock_tag_list, list) { + str = pp_sock_tag(sock_tag_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree) +{ + char *str; + struct rb_node *node; + struct proc_qtu_data *proc_qtu_data_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(proc_qtu_data_tree)) { + str = "proc_qtu_data_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "proc_qtu_data_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(proc_qtu_data_tree); + node; + node = rb_next(node)) { + proc_qtu_data_entry = rb_entry(node, + struct proc_qtu_data, + node); + str = pp_proc_qtu_data(proc_qtu_data_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + indent_level++; + prdebug_sock_tag_list(indent_level, + &proc_qtu_data_entry->sock_tag_list); + indent_level--; + + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree) +{ + char *str; + struct rb_node *node; + struct tag_ref *tag_ref_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(tag_ref_tree)) { + str = "tag_ref_tree{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "tag_ref_tree{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_ref_tree); + node; + node = rb_next(node)) { + tag_ref_entry = rb_entry(node, + struct tag_ref, + tn.node); + str = pp_tag_ref(tag_ref_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree) +{ + char *str; + struct rb_node *node; + struct uid_tag_data *uid_tag_data_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(uid_tag_data_tree)) { + str = "uid_tag_data_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "uid_tag_data_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(uid_tag_data_tree); + node; + node = rb_next(node)) { + uid_tag_data_entry = rb_entry(node, struct uid_tag_data, + node); + str = pp_uid_tag_data(uid_tag_data_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) { + indent_level++; + prdebug_tag_ref_tree(indent_level, + &uid_tag_data_entry->tag_ref_tree); + indent_level--; + } + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree) +{ + char *str; + struct rb_node *node; + struct tag_stat *ts_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(tag_stat_tree)) { + str = "tag_stat_tree{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "tag_stat_tree{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_stat_tree); + node; + node = rb_next(node)) { + ts_entry = rb_entry(node, struct tag_stat, tn.node); + str = pp_tag_stat(ts_entry); + pr_debug("%*d: %s\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list) +{ + char *str; + struct iface_stat *iface_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (list_empty(iface_stat_list)) { + str = "iface_stat_list=list_head{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "iface_stat_list=list_head{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(iface_entry, iface_stat_list, list) { + str = pp_iface_stat(iface_entry); + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + kfree(str); + + spin_lock_bh(&iface_entry->tag_stat_list_lock); + if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) { + indent_level++; + prdebug_tag_stat_tree(indent_level, + &iface_entry->tag_stat_tree); + indent_level--; + } + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +#endif /* ifdef DDEBUG */ +/*------------------------------------------*/ +static const char * const netdev_event_strings[] = { + "netdev_unknown", + "NETDEV_UP", + "NETDEV_DOWN", + "NETDEV_REBOOT", + "NETDEV_CHANGE", + "NETDEV_REGISTER", + "NETDEV_UNREGISTER", + "NETDEV_CHANGEMTU", + "NETDEV_CHANGEADDR", + "NETDEV_GOING_DOWN", + "NETDEV_CHANGENAME", + "NETDEV_FEAT_CHANGE", + "NETDEV_BONDING_FAILOVER", + "NETDEV_PRE_UP", + "NETDEV_PRE_TYPE_CHANGE", + "NETDEV_POST_TYPE_CHANGE", + "NETDEV_POST_INIT", + "NETDEV_UNREGISTER_BATCH", + "NETDEV_RELEASE", + "NETDEV_NOTIFY_PEERS", + "NETDEV_JOIN", +}; + +const char *netdev_evt_str(int netdev_event) +{ + if (netdev_event < 0 + || netdev_event >= ARRAY_SIZE(netdev_event_strings)) + return "bad event num"; + return netdev_event_strings[netdev_event]; +} diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h new file mode 100644 index 000000000000..b63871a0be5a --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.h @@ -0,0 +1,120 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_PRINT_H__ +#define __XT_QTAGUID_PRINT_H__ + +#include "xt_qtaguid_internal.h" + +#ifdef DDEBUG + +char *pp_tag_t(tag_t *tag); +char *pp_data_counters(struct data_counters *dc, bool showValues); +char *pp_tag_node(struct tag_node *tn); +char *pp_tag_ref(struct tag_ref *tr); +char *pp_tag_stat(struct tag_stat *ts); +char *pp_iface_stat(struct iface_stat *is); +char *pp_sock_tag(struct sock_tag *st); +char *pp_uid_tag_data(struct uid_tag_data *qtd); +char *pp_proc_qtu_data(struct proc_qtu_data *pqd); + +/*------------------------------------------*/ +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list); +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree); +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree); +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree); +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree); +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree); +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list); + +#else + +/*------------------------------------------*/ +static inline char *pp_tag_t(tag_t *tag) +{ + return NULL; +} +static inline char *pp_data_counters(struct data_counters *dc, bool showValues) +{ + return NULL; +} +static inline char *pp_tag_node(struct tag_node *tn) +{ + return NULL; +} +static inline char *pp_tag_ref(struct tag_ref *tr) +{ + return NULL; +} +static inline char *pp_tag_stat(struct tag_stat *ts) +{ + return NULL; +} +static inline char *pp_iface_stat(struct iface_stat *is) +{ + return NULL; +} +static inline char *pp_sock_tag(struct sock_tag *st) +{ + return NULL; +} +static inline char *pp_uid_tag_data(struct uid_tag_data *qtd) +{ + return NULL; +} +static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd) +{ + return NULL; +} + +/*------------------------------------------*/ +static inline +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list) +{ +} +static inline +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree) +{ +} +static inline +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree) +{ +} +static inline +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree) +{ +} +static inline +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree) +{ +} +static inline +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree) +{ +} +static inline +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list) +{ +} +#endif +/*------------------------------------------*/ +const char *netdev_evt_str(int netdev_event); +#endif /* ifndef __XT_QTAGUID_PRINT_H__ */ diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c new file mode 100644 index 000000000000..3c72bea2dd69 --- /dev/null +++ b/net/netfilter/xt_quota2.c @@ -0,0 +1,381 @@ +/* + * xt_quota2 - enhanced xt_quota that can count upwards and in packets + * as a minimal accounting match. + * by Jan Engelhardt <jengelh@medozas.de>, 2008 + * + * Originally based on xt_quota.c: + * netfilter module to enforce network quotas + * Sam Johnston <samj@samj.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License; either + * version 2 of the License, as published by the Free Software Foundation. + */ +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <asm/atomic.h> + +#include <linux/netfilter/x_tables.h> +#include <linux/netfilter/xt_quota2.h> +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +#include <linux/netfilter_ipv4/ipt_ULOG.h> +#endif + +/** + * @lock: lock to protect quota writers from each other + */ +struct xt_quota_counter { + u_int64_t quota; + spinlock_t lock; + struct list_head list; + atomic_t ref; + char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)]; + struct proc_dir_entry *procfs_entry; +}; + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +/* Harald's favorite number +1 :D From ipt_ULOG.C */ +static int qlog_nl_event = 112; +module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(event_num, + "Event number for NETLINK_NFLOG message. 0 disables log." + "111 is what ipt_ULOG uses."); +static struct sock *nflognl; +#endif + +static LIST_HEAD(counter_list); +static DEFINE_SPINLOCK(counter_list_lock); + +static struct proc_dir_entry *proc_xt_quota; +static unsigned int quota_list_perms = S_IRUGO | S_IWUSR; +static unsigned int quota_list_uid = 0; +static unsigned int quota_list_gid = 0; +module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR); +module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR); +module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR); + + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +static void quota2_log(unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const char *prefix) +{ + ulog_packet_msg_t *pm; + struct sk_buff *log_skb; + size_t size; + struct nlmsghdr *nlh; + + if (!qlog_nl_event) + return; + + size = NLMSG_SPACE(sizeof(*pm)); + size = max(size, (size_t)NLMSG_GOODSIZE); + log_skb = alloc_skb(size, GFP_ATOMIC); + if (!log_skb) { + pr_err("xt_quota2: cannot alloc skb for logging\n"); + return; + } + + /* NLMSG_PUT() uses "goto nlmsg_failure" */ + nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event, + sizeof(*pm)); + pm = NLMSG_DATA(nlh); + if (skb->tstamp.tv64 == 0) + __net_timestamp((struct sk_buff *)skb); + pm->data_len = 0; + pm->hook = hooknum; + if (prefix != NULL) + strlcpy(pm->prefix, prefix, sizeof(pm->prefix)); + else + *(pm->prefix) = '\0'; + if (in) + strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name)); + else + pm->indev_name[0] = '\0'; + + if (out) + strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); + else + pm->outdev_name[0] = '\0'; + + NETLINK_CB(log_skb).dst_group = 1; + pr_debug("throwing 1 packets to netlink group 1\n"); + netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC); + +nlmsg_failure: /* Used within NLMSG_PUT() */ + pr_debug("xt_quota2: error during NLMSG_PUT\n"); +} +#else +static void quota2_log(unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const char *prefix) +{ +} +#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */ + +static int quota_proc_read(char *page, char **start, off_t offset, + int count, int *eof, void *data) +{ + struct xt_quota_counter *e = data; + int ret; + + spin_lock_bh(&e->lock); + ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota); + spin_unlock_bh(&e->lock); + return ret; +} + +static int quota_proc_write(struct file *file, const char __user *input, + unsigned long size, void *data) +{ + struct xt_quota_counter *e = data; + char buf[sizeof("18446744073709551616")]; + + if (size > sizeof(buf)) + size = sizeof(buf); + if (copy_from_user(buf, input, size) != 0) + return -EFAULT; + buf[sizeof(buf)-1] = '\0'; + + spin_lock_bh(&e->lock); + e->quota = simple_strtoull(buf, NULL, 0); + spin_unlock_bh(&e->lock); + return size; +} + +static struct xt_quota_counter * +q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon) +{ + struct xt_quota_counter *e; + unsigned int size; + + /* Do not need all the procfs things for anonymous counters. */ + size = anon ? offsetof(typeof(*e), list) : sizeof(*e); + e = kmalloc(size, GFP_KERNEL); + if (e == NULL) + return NULL; + + e->quota = q->quota; + spin_lock_init(&e->lock); + if (!anon) { + INIT_LIST_HEAD(&e->list); + atomic_set(&e->ref, 1); + strlcpy(e->name, q->name, sizeof(e->name)); + } + return e; +} + +/** + * q2_get_counter - get ref to counter or create new + * @name: name of counter + */ +static struct xt_quota_counter * +q2_get_counter(const struct xt_quota_mtinfo2 *q) +{ + struct proc_dir_entry *p; + struct xt_quota_counter *e = NULL; + struct xt_quota_counter *new_e; + + if (*q->name == '\0') + return q2_new_counter(q, true); + + /* No need to hold a lock while getting a new counter */ + new_e = q2_new_counter(q, false); + if (new_e == NULL) + goto out; + + spin_lock_bh(&counter_list_lock); + list_for_each_entry(e, &counter_list, list) + if (strcmp(e->name, q->name) == 0) { + atomic_inc(&e->ref); + spin_unlock_bh(&counter_list_lock); + kfree(new_e); + pr_debug("xt_quota2: old counter name=%s", e->name); + return e; + } + e = new_e; + pr_debug("xt_quota2: new_counter name=%s", e->name); + list_add_tail(&e->list, &counter_list); + /* The entry having a refcount of 1 is not directly destructible. + * This func has not yet returned the new entry, thus iptables + * has not references for destroying this entry. + * For another rule to try to destroy it, it would 1st need for this + * func* to be re-invoked, acquire a new ref for the same named quota. + * Nobody will access the e->procfs_entry either. + * So release the lock. */ + spin_unlock_bh(&counter_list_lock); + + /* create_proc_entry() is not spin_lock happy */ + p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms, + proc_xt_quota); + + if (IS_ERR_OR_NULL(p)) { + spin_lock_bh(&counter_list_lock); + list_del(&e->list); + spin_unlock_bh(&counter_list_lock); + goto out; + } + p->data = e; + p->read_proc = quota_proc_read; + p->write_proc = quota_proc_write; + p->uid = quota_list_uid; + p->gid = quota_list_gid; + return e; + + out: + kfree(e); + return NULL; +} + +static int quota_mt2_check(const struct xt_mtchk_param *par) +{ + struct xt_quota_mtinfo2 *q = par->matchinfo; + + pr_debug("xt_quota2: check() flags=0x%04x", q->flags); + + if (q->flags & ~XT_QUOTA_MASK) + return -EINVAL; + + q->name[sizeof(q->name)-1] = '\0'; + if (*q->name == '.' || strchr(q->name, '/') != NULL) { + printk(KERN_ERR "xt_quota.3: illegal name\n"); + return -EINVAL; + } + + q->master = q2_get_counter(q); + if (q->master == NULL) { + printk(KERN_ERR "xt_quota.3: memory alloc failure\n"); + return -ENOMEM; + } + + return 0; +} + +static void quota_mt2_destroy(const struct xt_mtdtor_param *par) +{ + struct xt_quota_mtinfo2 *q = par->matchinfo; + struct xt_quota_counter *e = q->master; + + if (*q->name == '\0') { + kfree(e); + return; + } + + spin_lock_bh(&counter_list_lock); + if (!atomic_dec_and_test(&e->ref)) { + spin_unlock_bh(&counter_list_lock); + return; + } + + list_del(&e->list); + remove_proc_entry(e->name, proc_xt_quota); + spin_unlock_bh(&counter_list_lock); + kfree(e); +} + +static bool +quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) +{ + struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; + struct xt_quota_counter *e = q->master; + bool ret = q->flags & XT_QUOTA_INVERT; + + spin_lock_bh(&e->lock); + if (q->flags & XT_QUOTA_GROW) { + /* + * While no_change is pointless in "grow" mode, we will + * implement it here simply to have a consistent behavior. + */ + if (!(q->flags & XT_QUOTA_NO_CHANGE)) { + e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + } + ret = true; + } else { + if (e->quota >= skb->len) { + if (!(q->flags & XT_QUOTA_NO_CHANGE)) + e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + ret = !ret; + } else { + /* We are transitioning, log that fact. */ + if (e->quota) { + quota2_log(par->hooknum, + skb, + par->in, + par->out, + q->name); + } + /* we do not allow even small packets from now on */ + e->quota = 0; + } + } + spin_unlock_bh(&e->lock); + return ret; +} + +static struct xt_match quota_mt2_reg[] __read_mostly = { + { + .name = "quota2", + .revision = 3, + .family = NFPROTO_IPV4, + .checkentry = quota_mt2_check, + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), + .me = THIS_MODULE, + }, + { + .name = "quota2", + .revision = 3, + .family = NFPROTO_IPV6, + .checkentry = quota_mt2_check, + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), + .me = THIS_MODULE, + }, +}; + +static int __init quota_mt2_init(void) +{ + int ret; + pr_debug("xt_quota2: init()"); + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG + nflognl = netlink_kernel_create(&init_net, + NETLINK_NFLOG, 1, NULL, + NULL, THIS_MODULE); + if (!nflognl) + return -ENOMEM; +#endif + + proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net); + if (proc_xt_quota == NULL) + return -EACCES; + + ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg)); + if (ret < 0) + remove_proc_entry("xt_quota", init_net.proc_net); + pr_debug("xt_quota2: init() %d", ret); + return ret; +} + +static void __exit quota_mt2_exit(void) +{ + xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg)); + remove_proc_entry("xt_quota", init_net.proc_net); +} + +module_init(quota_mt2_init); +module_exit(quota_mt2_exit); +MODULE_DESCRIPTION("Xtables: countdown quota match; up counter"); +MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); +MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_quota2"); +MODULE_ALIAS("ip6t_quota2"); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index fe39f7e913df..ddf5e0507f5f 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -35,7 +35,7 @@ #include <net/netfilter/nf_conntrack.h> #endif -static void +void xt_socket_put_sk(struct sock *sk) { if (sk->sk_state == TCP_TIME_WAIT) @@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk) else sock_put(sk); } +EXPORT_SYMBOL(xt_socket_put_sk); static int extract_icmp4_fields(const struct sk_buff *skb, @@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb, return 0; } -static bool -socket_match(const struct sk_buff *skb, struct xt_action_param *par, - const struct xt_socket_mtinfo1 *info) +struct sock* +xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp = NULL; @@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) - return false; + return NULL; protocol = iph->protocol; saddr = iph->saddr; @@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, } else if (iph->protocol == IPPROTO_ICMP) { if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr, &sport, &dport)) - return false; + return NULL; } else { - return false; + return NULL; } #ifdef XT_SOCKET_HAVE_CONNTRACK @@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); + + pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n", + protocol, &saddr, ntohs(sport), + &daddr, ntohs(dport), + &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); + + return sk; +} +EXPORT_SYMBOL(xt_socket_get4_sk); + +static bool +socket_match(const struct sk_buff *skb, struct xt_action_param *par, + const struct xt_socket_mtinfo1 *info) +{ + struct sock *sk; + + sk = xt_socket_get4_sk(skb, par); if (sk != NULL) { bool wildcard; bool transparent = true; @@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, sk = NULL; } - pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n", - protocol, &saddr, ntohs(sport), - &daddr, ntohs(dport), - &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); - return (sk != NULL); } @@ -253,8 +265,8 @@ extract_icmp6_fields(const struct sk_buff *skb, return 0; } -static bool -socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) +struct sock* +xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par) { struct ipv6hdr *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; @@ -262,7 +274,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) struct in6_addr *daddr, *saddr; __be16 dport, sport; int thoff, tproto; - const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); if (tproto < 0) { @@ -274,7 +285,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) - return false; + return NULL; saddr = &iph->saddr; sport = hp->source; @@ -284,13 +295,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) } else if (tproto == IPPROTO_ICMPV6) { if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, &sport, &dport)) - return false; + return NULL; } else { - return false; + return NULL; } sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); + pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " + "(orig %pI6:%hu) sock %p\n", + tproto, saddr, ntohs(sport), + daddr, ntohs(dport), + &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); + return sk; +} +EXPORT_SYMBOL(xt_socket_get6_sk); + +static bool +socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) +{ + struct sock *sk; + const struct xt_socket_mtinfo1 *info; + + info = (struct xt_socket_mtinfo1 *) par->matchinfo; + sk = xt_socket_get6_sk(skb, par); if (sk != NULL) { bool wildcard; bool transparent = true; @@ -313,12 +341,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) sk = NULL; } - pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " - "(orig %pI6:%hu) sock %p\n", - tproto, saddr, ntohs(sport), - daddr, ntohs(dport), - &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); - return (sk != NULL); } #endif diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 78efe895b663..8e12c8a2b82b 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -10,6 +10,11 @@ menuconfig RFKILL To compile this driver as a module, choose M here: the module will be called rfkill. +config RFKILL_PM + bool "Power off on suspend" + depends on RFKILL && PM + default y + # LED trigger support config RFKILL_LEDS bool diff --git a/net/rfkill/core.c b/net/rfkill/core.c index be90640a2774..df2dae6b2723 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -769,6 +769,7 @@ void rfkill_pause_polling(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_pause_polling); +#ifdef CONFIG_RFKILL_PM void rfkill_resume_polling(struct rfkill *rfkill) { BUG_ON(!rfkill); @@ -803,14 +804,17 @@ static int rfkill_resume(struct device *dev) return 0; } +#endif static struct class rfkill_class = { .name = "rfkill", .dev_release = rfkill_release, .dev_attrs = rfkill_dev_attrs, .dev_uevent = rfkill_dev_uevent, +#ifdef CONFIG_RFKILL_PM .suspend = rfkill_suspend, .resume = rfkill_resume, +#endif }; bool rfkill_blocked(struct rfkill *rfkill) diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 1f1ef70f34f2..8e2a668c9230 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -159,3 +159,14 @@ config LIB80211_DEBUG from lib80211. If unsure, say N. + +config CFG80211_ALLOW_RECONNECT + bool "Allow reconnect while already connected" + depends on CFG80211 + default n + help + cfg80211 stack doesn't allow to connect if you are already + connected. This option allows to make a connection in this case. + + Select this option ONLY for wlan drivers that are specifically + built for such purposes. diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ca76d8b55c4a..72f7feee254a 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1773,6 +1773,7 @@ static void restore_alpha2(char *alpha2, bool reset_user) static void restore_regulatory_settings(bool reset_user) { char alpha2[2]; + char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; struct regulatory_request *reg_request, *tmp; LIST_HEAD(tmp_reg_req_list); @@ -1823,11 +1824,13 @@ static void restore_regulatory_settings(bool reset_user) /* First restore to the basic regulatory settings */ cfg80211_regdomain = cfg80211_world_regdom; + world_alpha2[0] = cfg80211_regdomain->alpha2[0]; + world_alpha2[1] = cfg80211_regdomain->alpha2[1]; mutex_unlock(®_mutex); mutex_unlock(&cfg80211_mutex); - regulatory_hint_core(cfg80211_regdomain->alpha2); + regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ae0c2256ba3b..cbbc92731ec8 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -17,7 +17,7 @@ #include "nl80211.h" #include "wext-compat.h" -#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) +#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ) void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index b7b6ff8be553..cf4be21236b8 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -659,8 +659,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; +#ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_CONNECTED) return; +#endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); @@ -758,10 +760,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); +#ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_IDLE) return -EALREADY; if (WARN_ON(wdev->connect_keys)) { +#else + if (wdev->connect_keys) { +#endif kfree(wdev->connect_keys); wdev->connect_keys = NULL; } |