From 52409fae3e4b8d16b68b61902fc09075cd97b75d Mon Sep 17 00:00:00 2001 From: Dominik Sliwa Date: Sun, 2 Jul 2017 16:41:37 +0200 Subject: Backports generated from 4.11 kernel Initial commit. Signed-off-by: Dominik Sliwa --- compat/Kconfig | 174 ++++++ compat/Makefile | 47 ++ compat/backport-3.10.c | 199 +++++++ compat/backport-3.11.c | 124 ++++ compat/backport-3.12.c | 72 +++ compat/backport-3.13.c | 309 ++++++++++ compat/backport-3.14.c | 90 +++ compat/backport-3.15.c | 88 +++ compat/backport-3.17.c | 166 ++++++ compat/backport-3.18.c | 324 ++++++++++ compat/backport-3.19.c | 165 ++++++ compat/backport-3.2.c | 25 + compat/backport-4.0.c | 323 ++++++++++ compat/backport-4.1.c | 84 +++ compat/backport-4.2.c | 67 +++ compat/backport-4.3.c | 245 ++++++++ compat/backport-4.4.c | 154 +++++ compat/backport-4.5.c | 153 +++++ compat/backport-4.6.c | 77 +++ compat/backport-4.7.c | 182 ++++++ compat/backport-4.8.c | 146 +++++ compat/backports.h | 26 + compat/compat-3.0.c | 91 +++ compat/compat-3.1.c | 118 ++++ compat/compat-3.3.c | 239 ++++++++ compat/compat-3.4.c | 201 +++++++ compat/compat-3.5.c | 167 ++++++ compat/compat-3.6.c | 27 + compat/compat-3.7.c | 290 +++++++++ compat/compat-3.8.c | 455 ++++++++++++++ compat/compat-3.9.c | 238 ++++++++ compat/crypto-ccm.c | 1073 +++++++++++++++++++++++++++++++++ compat/crypto-skcipher.c | 997 +++++++++++++++++++++++++++++++ compat/drivers-base-devcoredump.c | 386 ++++++++++++ compat/hid-ids.h | 866 +++++++++++++++++++++++++++ compat/lib-cordic.c | 101 ++++ compat/lib-rhashtable.c | 1187 +++++++++++++++++++++++++++++++++++++ compat/main.c | 92 +++ compat/user_namespace.c | 68 +++ 39 files changed, 9836 insertions(+) create mode 100644 compat/Kconfig create mode 100644 compat/Makefile create mode 100644 compat/backport-3.10.c create mode 100644 compat/backport-3.11.c create mode 100644 compat/backport-3.12.c create mode 100644 compat/backport-3.13.c create mode 100644 compat/backport-3.14.c create mode 100644 compat/backport-3.15.c create mode 100644 compat/backport-3.17.c create mode 100644 compat/backport-3.18.c create mode 100644 compat/backport-3.19.c create mode 100644 compat/backport-3.2.c create mode 100644 compat/backport-4.0.c create mode 100644 compat/backport-4.1.c create mode 100644 compat/backport-4.2.c create mode 100644 compat/backport-4.3.c create mode 100644 compat/backport-4.4.c create mode 100644 compat/backport-4.5.c create mode 100644 compat/backport-4.6.c create mode 100644 compat/backport-4.7.c create mode 100644 compat/backport-4.8.c create mode 100644 compat/backports.h create mode 100644 compat/compat-3.0.c create mode 100644 compat/compat-3.1.c create mode 100644 compat/compat-3.3.c create mode 100644 compat/compat-3.4.c create mode 100644 compat/compat-3.5.c create mode 100644 compat/compat-3.6.c create mode 100644 compat/compat-3.7.c create mode 100644 compat/compat-3.8.c create mode 100644 compat/compat-3.9.c create mode 100644 compat/crypto-ccm.c create mode 100644 compat/crypto-skcipher.c create mode 100644 compat/drivers-base-devcoredump.c create mode 100644 compat/hid-ids.h create mode 100644 compat/lib-cordic.c create mode 100644 compat/lib-rhashtable.c create mode 100644 compat/main.c create mode 100644 compat/user_namespace.c (limited to 'compat') diff --git a/compat/Kconfig b/compat/Kconfig new file mode 100644 index 0000000..7bcf03d --- /dev/null +++ b/compat/Kconfig @@ -0,0 +1,174 @@ +# +# backport Kconfig +# +# Some options are user-selectable ("BPAUTO_USERSEL_*") +# +# Most options, however, follow a few different schemes: +# +# A) An option that is selected by drivers ("select FOO") will be +# changed to "select BPAUTO_FOO" (if the option BPAUTO_FOO +# exists). The option BPAUTO_FOO then controls setting of the +# BPAUTO_BUILD_FOO option, which is a module, like this: +# +# config BPAUTO_BUILD_FOO +# tristate +# # or bool +# +# # not possible on kernel < X.Y, build will fail if any +# # drivers are allowed to build on kernels < X.Y +# depends on KERNEL_X_Y +# +# # don't build the backport code if FOO is in the kernel +# # already, but only if the kernel version is also >= X.Z; +# # this is an example of backporting where the version of +# # the FOO subsystem that we need is only available from +# # kernel version X.Z +# depends on !FOO || KERNEL_X_Z +# +# # build if driver needs it (it selects BPAUTO_FOO) +# default m if BPAUTO_FOO +# +# # or for build-testing (BPAUTO_USERSEL_BUILD_ALL is enabled) +# default m if BPAUTO_USERSEL_BUILD_ALL +# +# config BPAUTO_FOO +# bool +# +# This only works as-is if the kernel code is usable on any version, +# otherwise the "&& !FOO" part needs to be different. +# +# +# B) An option for code always present on some kernels (e.g. KFIFO). +# This simply depends on/sets the default based on the version: +# +# config BPAUTO_BUILD_KFIFO +# def_bool y +# depends on KERNEL_2_6_36 +# +# +# C) similarly, a kconfig symbol for an option, e.g. +# BPAUTO_OPTION_SOME_FIX (no examples provided) check git log +# +# +# Variations are obviously possible. +# + +config BP_MODULES + option modules + bool + default MODULES + + help + This symbol is necessary for the newer kconf tool, it looks + for the "option modules" to control the 'm' state. + +config BPAUTO_BUILD_CORDIC + tristate + depends on m + depends on !CORDIC + depends on KERNEL_3_1 + default m if BPAUTO_CORDIC + default m if BPAUTO_USERSEL_BUILD_ALL + #module-name cordic + #c-file lib/cordic.c + +config BPAUTO_CORDIC + bool + +config BPAUTO_MII + bool + +config BPAUTO_BUILD_LEDS + bool + depends on !NEW_LEDS || LEDS_CLASS=n || !LEDS_TRIGGERS + default y if BPAUTO_NEW_LEDS + default y if BPAUTO_LEDS_CLASS + default y if BPAUTO_LEDS_TRIGGERS + +config BPAUTO_NEW_LEDS + bool + +config BPAUTO_LEDS_CLASS + bool + +config BPAUTO_LEDS_TRIGGERS + bool + +config BPAUTO_USERSEL_BUILD_ALL + bool "Build all compat code" + help + This option selects all the compat code options + that would otherwise only be selected by drivers. + + It's only really useful for compat testing, so + you probably shouldn't enable it. + +config BPAUTO_CRYPTO_CCM + depends on CRYPTO_AEAD + depends on CRYPTO_CTR + bool + +config BPAUTO_BUILD_CRYPTO_CCM + bool + default n if CRYPTO_CCM + default y if BPAUTO_CRYPTO_CCM + #c-file crypto/ccm.c + +config BPAUTO_CRYPTO_SKCIPHER + tristate + depends on m + depends on KERNEL_4_3 + default y if BACKPORTED_MAC802154 + default y if BACKPORTED_LIB80211_CRYPT_WEP + default y if BACKPORTED_LIB80211_CRYPT_TKIP + default y if BACKPORTED_BT + #c-file crypto/skcipher.c + #module-name skcipher + +config BPAUTO_WANT_DEV_COREDUMP + bool + +config BPAUTO_BUILD_WANT_DEV_COREDUMP + bool + default n if DEV_COREDUMP + default n if DISABLE_DEV_COREDUMP + default y if BPAUTO_WANT_DEV_COREDUMP + #h-file linux/devcoredump.h + #c-file drivers/base/devcoredump.c + +config BPAUTO_RHASHTABLE + bool + # current API of rhashtable was introduced in version 4.9 + # (the one including rhltable) + depends on KERNEL_4_9 + # not very nice - but better than always having it + default y if BACKPORTED_MAC80211 + #h-file linux/rhashtable.h + #c-file lib/rhashtable.c + +config BPAUTO_BUILD_HDMI + depends on n + bool + # the hdmi driver got some new apis like hdmi_infoframe_unpack() in + # kernel 4.0 which are used by some drivers + depends on KERNEL_4_0 + #h-file linux/hdmi.h + #c-file drivers/video/hdmi.c + +config BPAUTO_HDMI + bool + select BPAUTO_BUILD_HDMI if KERNEL_4_0 + # these drivers are using the new features of the hdmi driver. + default y if BACKPORTED_VIDEO_ADV7511 + default y if BACKPORTED_VIDEO_ADV7604 + default y if BACKPORTED_VIDEO_ADV7842 + +config BPAUTO_FRAME_VECTOR + bool + +config BPAUTO_BUILD_FRAME_VECTOR + depends on n + bool + default n if FRAME_VECTOR + default y if BPAUTO_FRAME_VECTOR + #c-file mm/frame_vector.c diff --git a/compat/Makefile b/compat/Makefile new file mode 100644 index 0000000..07416c8 --- /dev/null +++ b/compat/Makefile @@ -0,0 +1,47 @@ +ccflags-y += -I$(src) +ifeq ($(CONFIG_BACKPORT_INTEGRATE),) +obj-m += compat.o +else +obj-y += compat.o +endif +compat-y += main.o + +# Kernel backport compatibility code +compat-$(CPTCFG_KERNEL_3_0) += compat-3.0.o +compat-$(CPTCFG_KERNEL_3_1) += compat-3.1.o +compat-$(CPTCFG_KERNEL_3_2) += backport-3.2.o +compat-$(CPTCFG_KERNEL_3_3) += compat-3.3.o +compat-$(CPTCFG_KERNEL_3_4) += compat-3.4.o +compat-$(CPTCFG_KERNEL_3_5) += compat-3.5.o user_namespace.o +compat-$(CPTCFG_KERNEL_3_6) += compat-3.6.o +compat-$(CPTCFG_KERNEL_3_7) += compat-3.7.o +compat-$(CPTCFG_KERNEL_3_8) += compat-3.8.o +compat-$(CPTCFG_KERNEL_3_9) += compat-3.9.o +compat-$(CPTCFG_KERNEL_3_10) += backport-3.10.o +compat-$(CPTCFG_KERNEL_3_11) += backport-3.11.o +compat-$(CPTCFG_KERNEL_3_12) += backport-3.12.o +compat-$(CPTCFG_KERNEL_3_13) += backport-3.13.o +compat-$(CPTCFG_KERNEL_3_14) += backport-3.14.o +compat-$(CPTCFG_KERNEL_3_15) += backport-3.15.o +compat-$(CPTCFG_KERNEL_3_17) += backport-3.17.o +compat-$(CPTCFG_KERNEL_3_18) += backport-3.18.o +compat-$(CPTCFG_KERNEL_3_19) += backport-3.19.o +compat-$(CPTCFG_KERNEL_4_0) += backport-4.0.o +compat-$(CPTCFG_KERNEL_4_1) += backport-4.1.o +compat-$(CPTCFG_KERNEL_4_2) += backport-4.2.o +compat-$(CPTCFG_KERNEL_4_3) += backport-4.3.o +compat-$(CPTCFG_KERNEL_4_4) += backport-4.4.o +compat-$(CPTCFG_KERNEL_4_5) += backport-4.5.o +compat-$(CPTCFG_KERNEL_4_6) += backport-4.6.o +compat-$(CPTCFG_KERNEL_4_7) += backport-4.7.o +compat-$(CPTCFG_KERNEL_4_8) += backport-4.8.o + +compat-$(CPTCFG_BPAUTO_BUILD_CRYPTO_CCM) += crypto-ccm.o +compat-$(CPTCFG_BPAUTO_CRYPTO_SKCIPHER) += crypto-skcipher.o +skcipher-objs += crypto-skcipher.o +obj-$(CPTCFG_BPAUTO_CRYPTO_SKCIPHER) += skcipher.o +compat-$(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP) += drivers-base-devcoredump.o +compat-$(CPTCFG_BPAUTO_RHASHTABLE) += lib-rhashtable.o +cordic-objs += lib-cordic.o +obj-$(CPTCFG_BPAUTO_BUILD_CORDIC) += cordic.o +compat-$(CPTCFG_BPAUTO_BUILD_CRYPTO_CCM) += crypto-ccm.o diff --git a/compat/backport-3.10.c b/compat/backport-3.10.c new file mode 100644 index 0000000..5cab709 --- /dev/null +++ b/compat/backport-3.10.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2013 Luis R. Rodriguez + * + * Linux backport symbols for kernels 3.10. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void proc_set_size(struct proc_dir_entry *de, loff_t size) +{ + de->size = size; +} +EXPORT_SYMBOL_GPL(proc_set_size); + +void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) +{ + de->uid = uid; + de->gid = gid; +} +EXPORT_SYMBOL_GPL(proc_set_user); + +/* get_random_int() was not exported for module use until 3.10-rc. + Implement it here in terms of the more expensive get_random_bytes() + */ +unsigned int get_random_int(void) +{ + unsigned int r; + get_random_bytes(&r, sizeof(r)); + + return r; +} +EXPORT_SYMBOL_GPL(get_random_int); + +#ifdef CONFIG_TTY +/** + * tty_port_tty_wakeup - helper to wake up a tty + * + * @port: tty port + */ +void tty_port_tty_wakeup(struct tty_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(port); + + if (tty) { + tty_wakeup(tty); + tty_kref_put(tty); + } +} +EXPORT_SYMBOL_GPL(tty_port_tty_wakeup); + +/** + * tty_port_tty_hangup - helper to hang up a tty + * + * @port: tty port + * @check_clocal: hang only ttys with CLOCAL unset? + */ +void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) +{ + struct tty_struct *tty = tty_port_tty_get(port); + + if (tty && (!check_clocal || !C_CLOCAL(tty))) + tty_hangup(tty); + tty_kref_put(tty); +} +EXPORT_SYMBOL_GPL(tty_port_tty_hangup); +#endif /* CONFIG_TTY */ + +#ifdef CONFIG_PCI_IOV +/* + * pci_vfs_assigned - returns number of VFs are assigned to a guest + * @dev: the PCI device + * + * Returns number of VFs belonging to this device that are assigned to a guest. + * If device is not a physical function returns -ENODEV. + */ +int pci_vfs_assigned(struct pci_dev *dev) +{ + struct pci_dev *vfdev; + unsigned int vfs_assigned = 0; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + + return vfs_assigned; +} +EXPORT_SYMBOL_GPL(pci_vfs_assigned); +#endif /* CONFIG_PCI_IOV */ + +#ifdef CONFIG_OF +/** + * of_property_read_u32_index - Find and read a u32 from a multi-value property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the u32 in the list of values + * @out_value: pointer to return value, modified only if no error. + * + * Search for a property in a device node and read nth 32-bit value from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_value is modified only if a valid u32 value can be decoded. + */ +int of_property_read_u32_index(const struct device_node *np, + const char *propname, + u32 index, u32 *out_value) +{ + const u32 *val = of_find_property_value_of_size(np, propname, + ((index + 1) * sizeof(*out_value))); + + if (IS_ERR(val)) + return PTR_ERR(val); + + *out_value = be32_to_cpup(((__be32 *)val) + index); + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u32_index); +#endif /* CONFIG_OF */ + +static inline void set_page_count(struct page *page, int v) +{ + atomic_set(&page->_count, v); +} + +/* + * Turn a non-refcounted page (->_count == 0) into refcounted with + * a count of one. + */ +static inline void set_page_refcounted(struct page *page) +{ + VM_BUG_ON(PageTail(page)); + VM_BUG_ON(atomic_read(&page->_count)); + set_page_count(page, 1); +} + +/* + * split_page takes a non-compound higher-order page, and splits it into + * n (1< +#include + +static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) +{ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) + return false; + + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; + + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + + return true; +} + +/** + * sg_miter_skip - reposition mapping iterator + * @miter: sg mapping iter to be skipped + * @offset: number of bytes to plus the current location + * + * Description: + * Sets the offset of @miter to its current location plus @offset bytes. + * If mapping iterator @miter has been proceeded by sg_miter_next(), this + * stops @miter. + * + * Context: + * Don't care if @miter is stopped, or not proceeded yet. + * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. + * + * Returns: + * true if @miter contains the valid mapping. false if end of sg + * list is reached. + */ +static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) +{ + sg_miter_stop(miter); + + while (offset) { + off_t consumed; + + if (!sg_miter_get_next_page(miter)) + return false; + + consumed = min_t(off_t, offset, miter->__remaining); + miter->__offset += consumed; + miter->__remaining -= consumed; + offset -= consumed; + } + + return true; +} + +/** + * sg_copy_buffer - Copy data between a linear buffer and an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying + * @to_buffer: transfer direction (true == from an sg list to a + * buffer, false == from a buffer to an sg list + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) +{ + unsigned int offset = 0; + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + if (!sg_miter_skip(&miter, skip)) + return false; + + local_irq_save(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else + memcpy(miter.addr, buf + offset, len); + + offset += len; + } + + sg_miter_stop(&miter); + + local_irq_restore(flags); + return offset; +} +EXPORT_SYMBOL_GPL(sg_copy_buffer); diff --git a/compat/backport-3.12.c b/compat/backport-3.12.c new file mode 100644 index 0000000..c9b21e8 --- /dev/null +++ b/compat/backport-3.12.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2013 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.12. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +/* + * Allocator for buffer that is going to be passed to hid_output_report() + */ +u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) +{ + /* + * 7 extra bytes are necessary to achieve proper functionality + * of implement() working on 8 byte chunks + */ + + int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; + + return kmalloc(len, flags); +} +EXPORT_SYMBOL_GPL(hid_alloc_report_buf); + +#if BITS_PER_LONG == 32 +/** + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + * @dividend: 64bit dividend + * @divisor: 64bit divisor + * @remainder: 64bit remainder + * + * This implementation is a comparable to algorithm used by div64_u64. + * But this operation, which includes math for calculating the remainder, + * is kept distinct to avoid slowing down the div64_u64 operation on 32bit + * systems. + */ +#ifndef backports_div64_u64_rem_add +u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + u32 high = divisor >> 32; + u64 quot; + + if (high == 0) { + u32 rem32; + quot = div_u64_rem(dividend, divisor, &rem32); + *remainder = rem32; + } else { + int n = 1 + fls(high); + quot = div_u64(dividend >> n, divisor >> n); + + if (quot != 0) + quot--; + + *remainder = dividend - quot * divisor; + if (*remainder >= divisor) { + quot++; + *remainder -= divisor; + } + } + + return quot; +} +EXPORT_SYMBOL_GPL(div64_u64_rem); +#endif /* backports_div64_u64_rem_add */ +#endif /* BITS_PER_LONG */ diff --git a/compat/backport-3.13.c b/compat/backport-3.13.c new file mode 100644 index 0000000..496bee6 --- /dev/null +++ b/compat/backport-3.13.c @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2013 Hauke Mehrtens + * Copyright (c) 2013 Hannes Frederic Sowa + * Copyright (c) 2014 Luis R. Rodriguez + * + * Backport functionality introduced in Linux 3.13. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_GEQ(3,5,0) +#ifdef CONFIG_REGULATOR +#include +#include +#include +#include + +static void devm_rdev_release(struct device *dev, void *res) +{ + regulator_unregister(*(struct regulator_dev **)res); +} + +/** + * devm_regulator_register - Resource managed regulator_register() + * @regulator_desc: regulator to register + * @config: runtime configuration for regulator + * + * Called by regulator drivers to register a regulator. Returns a + * valid pointer to struct regulator_dev on success or an ERR_PTR() on + * error. The regulator will automatically be released when the device + * is unbound. + */ +struct regulator_dev *devm_regulator_register(struct device *dev, + const struct regulator_desc *regulator_desc, + const struct regulator_config *config) +{ + struct regulator_dev **ptr, *rdev; + + ptr = devres_alloc(devm_rdev_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + rdev = regulator_register(regulator_desc, config); + if (!IS_ERR(rdev)) { + *ptr = rdev; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return rdev; +} +EXPORT_SYMBOL_GPL(devm_regulator_register); + +static int devm_rdev_match(struct device *dev, void *res, void *data) +{ + struct regulator_dev **r = res; + if (!r || !*r) { + WARN_ON(!r || !*r); + return 0; + } + return *r == data; +} + +/** + * devm_regulator_unregister - Resource managed regulator_unregister() + * @regulator: regulator to free + * + * Unregister a regulator registered with devm_regulator_register(). + * Normally this function will not need to be called and the resource + * management code will ensure that the resource is freed. + */ +void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev) +{ + int rc; + + rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev); + if (rc != 0) + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_regulator_unregister); +#endif /* CONFIG_REGULATOR */ +#endif /* LINUX_VERSION_IS_GEQ(3,5,0) */ + +/************* generic netlink backport *****************/ +#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0) + +#undef genl_register_family +#undef genl_unregister_family + +int __backport_genl_register_family(struct genl_family *family) +{ + int i, ret; + +#define __copy(_field) family->family._field = family->_field + __copy(id); + __copy(hdrsize); + __copy(version); + __copy(maxattr); + strncpy(family->family.name, family->name, sizeof(family->family.name)); + __copy(netnsok); + __copy(pre_doit); + __copy(post_doit); +#if LINUX_VERSION_IS_GEQ(3,10,0) + __copy(parallel_ops); +#endif +#if LINUX_VERSION_IS_GEQ(3,11,0) + __copy(module); +#endif +#undef __copy + + ret = genl_register_family(&family->family); + if (ret < 0) + return ret; + + family->attrbuf = family->family.attrbuf; + family->id = family->family.id; + + for (i = 0; i < family->n_ops; i++) { + ret = genl_register_ops(&family->family, &family->ops[i]); + if (ret < 0) + goto error; + } + + for (i = 0; i < family->n_mcgrps; i++) { + ret = genl_register_mc_group(&family->family, + &family->mcgrps[i]); + if (ret) + goto error; + } + + return 0; + + error: + backport_genl_unregister_family(family); + return ret; +} +EXPORT_SYMBOL_GPL(__backport_genl_register_family); + +int backport_genl_unregister_family(struct genl_family *family) +{ + int err; + err = genl_unregister_family(&family->family); + return err; +} +EXPORT_SYMBOL_GPL(backport_genl_unregister_family); + +#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0) */ + +#ifdef __BACKPORT_NET_GET_RANDOM_ONCE +struct __net_random_once_work { + struct work_struct work; + struct static_key *key; +}; + +static void __net_random_once_deferred(struct work_struct *w) +{ + struct __net_random_once_work *work = + container_of(w, struct __net_random_once_work, work); + if (!static_key_enabled(work->key)) + static_key_slow_inc(work->key); + kfree(work); +} + +static void __net_random_once_disable_jump(struct static_key *key) +{ + struct __net_random_once_work *w; + + w = kmalloc(sizeof(*w), GFP_ATOMIC); + if (!w) + return; + + INIT_WORK(&w->work, __net_random_once_deferred); + w->key = key; + schedule_work(&w->work); +} + +bool __net_get_random_once(void *buf, int nbytes, bool *done, + struct static_key *done_key) +{ + static DEFINE_SPINLOCK(lock); + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + if (*done) { + spin_unlock_irqrestore(&lock, flags); + return false; + } + + get_random_bytes(buf, nbytes); + *done = true; + spin_unlock_irqrestore(&lock, flags); + + __net_random_once_disable_jump(done_key); + + return true; +} +EXPORT_SYMBOL_GPL(__net_get_random_once); +#endif /* __BACKPORT_NET_GET_RANDOM_ONCE */ + +#ifdef CONFIG_PCI +#define pci_bus_read_dev_vendor_id LINUX_BACKPORT(pci_bus_read_dev_vendor_id) +static bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, + int crs_timeout) +{ + int delay = 1; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) + return false; + + /* some broken boards return 0 or ~0 if a slot is empty: */ + if (*l == 0xffffffff || *l == 0x00000000 || + *l == 0x0000ffff || *l == 0xffff0000) + return false; + + /* + * Configuration Request Retry Status. Some root ports return the + * actual device ID instead of the synthetic ID (0xFFFF) required + * by the PCIe spec. Ignore the device ID and only check for + * (vendor id == 1). + */ + while ((*l & 0xffff) == 0x0001) { + if (!crs_timeout) + return false; + + msleep(delay); + delay *= 2; + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) + return false; + /* Card hasn't responded in 60 seconds? Must be stuck. */ + if (delay > crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n", + pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +EXPORT_SYMBOL_GPL(pci_device_is_present); +#endif /* CONFIG_PCI */ + +#ifdef CONFIG_HWMON +struct device* +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device *hwdev; + + hwdev = hwmon_device_register(dev); + hwdev->groups = groups; + dev_set_drvdata(hwdev, drvdata); + return hwdev; +} + +static void devm_hwmon_release(struct device *dev, void *res) +{ + struct device *hwdev = *(struct device **)res; + + hwmon_device_unregister(hwdev); +} + +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device **ptr, *hwdev; + + if (!dev) + return ERR_PTR(-EINVAL); + + ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups); + if (IS_ERR(hwdev)) + goto error; + + *ptr = hwdev; + devres_add(dev, ptr); + return hwdev; + +error: + devres_free(ptr); + return hwdev; +} +EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups); +#endif diff --git a/compat/backport-3.14.c b/compat/backport-3.14.c new file mode 100644 index 0000000..aeb3004 --- /dev/null +++ b/compat/backport-3.14.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2014 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.14. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +#ifdef CONFIG_PCI_MSI +/** + * pci_enable_msi_range - configure device's MSI capability structure + * @dev: device to configure + * @minvec: minimal number of interrupts to configure + * @maxvec: maximum number of interrupts to configure + * + * This function tries to allocate a maximum possible number of interrupts in a + * range between @minvec and @maxvec. It returns a negative errno if an error + * occurs. If it succeeds, it returns the actual number of interrupts allocated + * and updates the @dev's irq member to the lowest new interrupt number; + * the other interrupt numbers allocated to this device are consecutive. + **/ +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msi_range); +#endif + +#ifdef CONFIG_PCI_MSI +/** + * pci_enable_msix_range - configure device's MSI-X capability structure + * @dev: pointer to the pci_dev data structure of MSI-X device function + * @entries: pointer to an array of MSI-X entries + * @minvec: minimum number of MSI-X irqs requested + * @maxvec: maximum number of MSI-X irqs requested + * + * Setup the MSI-X capability structure of device function with a maximum + * possible number of interrupts in the range between @minvec and @maxvec + * upon its software driver call to request for MSI-X mode enabled on its + * hardware device function. It returns a negative errno if an error occurs. + * If it succeeds, it returns the actual number of interrupts allocated and + * indicates the successful configuration of MSI-X capability structure + * with new allocated MSI-X interrupts. + **/ +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msix_range); +#endif diff --git a/compat/backport-3.15.c b/compat/backport-3.15.c new file mode 100644 index 0000000..c0023ac --- /dev/null +++ b/compat/backport-3.15.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2014 Hauke Mehrtens + * Copyright (c) 2015 Luis R. Rodriguez + * + * Backport functionality introduced in Linux 3.15. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * devm_kstrdup - Allocate resource managed space and + * copy an existing string into that. + * @dev: Device to allocate memory for + * @s: the string to duplicate + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kmalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} +EXPORT_SYMBOL_GPL(devm_kstrdup); + +#ifdef CONFIG_OF +/** + * of_property_count_elems_of_size - Count the number of elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property in a device node and count the number of elements of + * size elem_size in it. Returns number of elements on sucess, -EINVAL if the + * property does not exist or its length does not match a multiple of elem_size + * and -ENODATA if the property does not have a value. + */ +int of_property_count_elems_of_size(const struct device_node *np, + const char *propname, int elem_size) +{ + struct property *prop = of_find_property(np, propname, NULL); + + if (!prop) + return -EINVAL; + if (!prop->value) + return -ENODATA; + + if (prop->length % elem_size != 0) { + pr_err("size of %s in node %s is not a multiple of %d\n", + propname, np->full_name, elem_size); + return -EINVAL; + } + + return prop->length / elem_size; +} +EXPORT_SYMBOL_GPL(of_property_count_elems_of_size); +#endif + +void kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +EXPORT_SYMBOL_GPL(kvfree); diff --git a/compat/backport-3.17.c b/compat/backport-3.17.c new file mode 100644 index 0000000..bf6027c --- /dev/null +++ b/compat/backport-3.17.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2014 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.17. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +int bit_wait(void *word) +{ + schedule(); + return 0; +} +EXPORT_SYMBOL_GPL(bit_wait); + +int bit_wait_io(void *word) +{ + io_schedule(); + return 0; +} +EXPORT_SYMBOL_GPL(bit_wait_io); + +/** + * ktime_get_raw - Returns the raw monotonic time in ktime_t format + */ +ktime_t ktime_get_raw(void) +{ + struct timespec ts; + + getrawmonotonic(&ts); + return timespec_to_ktime(ts); +} +EXPORT_SYMBOL_GPL(ktime_get_raw); + + +/** + * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +static u64 backport_nsecs_to_jiffies64(u64 n) +{ +#if (NSEC_PER_SEC % HZ) == 0 + /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ + return div_u64(n, NSEC_PER_SEC / HZ); +#elif (HZ % 512) == 0 + /* overflow after 292 years if HZ = 1024 */ + return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); +#else + /* + * Generic case - optimized for cases where HZ is a multiple of 3. + * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. + */ + return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); +#endif +} + +/** + * nsecs_to_jiffies - Convert nsecs in u64 to jiffies + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +unsigned long nsecs_to_jiffies(u64 n) +{ + return (unsigned long)backport_nsecs_to_jiffies64(n); +} +EXPORT_SYMBOL_GPL(nsecs_to_jiffies); + +/** + * devm_kvasprintf - Allocate resource managed space + * for the formatted string. + * @dev: Device to allocate memory for + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * @fmt: the formatted string to duplicate + * @ap: the list of tokens to be placed in the formatted string + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = devm_kmalloc(dev, len+1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len+1, fmt, ap); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kvasprintf); + +/** + * devm_kasprintf - Allocate resource managed space + * and copy an existing formatted string into that + * @dev: Device to allocate memory for + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * @fmt: the string to duplicate + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = devm_kvasprintf(dev, gfp, fmt, ap); + va_end(ap); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kasprintf); + +#define STANDARD_PARAM_DEF(name, type, format, strtolfn) \ + int param_set_##name(const char *val, const struct kernel_param *kp) \ + { \ + return strtolfn(val, 0, (type *)kp->arg); \ + } \ + int param_get_##name(char *buffer, const struct kernel_param *kp) \ + { \ + return scnprintf(buffer, PAGE_SIZE, format, \ + *((type *)kp->arg)); \ + } \ + struct kernel_param_ops param_ops_##name = { \ + .set = param_set_##name, \ + .get = param_get_##name, \ + }; \ + EXPORT_SYMBOL(param_set_##name); \ + EXPORT_SYMBOL(param_get_##name); \ + EXPORT_SYMBOL(param_ops_##name) +STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); diff --git a/compat/backport-3.18.c b/compat/backport-3.18.c new file mode 100644 index 0000000..dbef343 --- /dev/null +++ b/compat/backport-3.18.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2014 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.18. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * eth_get_headlen - determine the the length of header for an ethernet frame + * @data: pointer to start of frame + * @len: total length of frame + * + * Make a best effort attempt to pull the length for all of the headers for + * a given frame in a linear buffer. + */ +int eth_get_headlen(unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); + } else if (protocol == htons(ETH_P_FCOE)) { + if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) + return max_len; + hlen = FCOE_HEADER_LEN; + } else { + return hdr.network - data; + } + + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} +EXPORT_SYMBOL_GPL(eth_get_headlen); + +#define sock_efree LINUX_BACKPORT(sock_efree) +static void sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +/** + * skb_clone_sk - create clone of skb, and take reference to socket + * @skb: the skb to clone + * + * This function creates a clone of a buffer that holds a reference on + * sk_refcnt. Buffers created via this function are meant to be + * returned using sock_queue_err_skb, or free via kfree_skb. + * + * When passing buffers allocated with this function to sock_queue_err_skb + * it is necessary to wrap the call with sock_hold/sock_put in order to + * prevent the socket from being released prior to being enqueued on + * the sk_error_queue. + */ +struct sk_buff *skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = sock_efree; + + return clone; +} +EXPORT_SYMBOL_GPL(skb_clone_sk); + +#if LINUX_VERSION_IS_GEQ(3,3,0) +/* + * skb_complete_wifi_ack() needs to get backported, because the version from + * 3.18 added the sock_hold() and sock_put() calles missing in older versions. + */ +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) +{ + struct sock *sk = skb->sk; + struct sock_exterr_skb *serr; + int err; + +#if LINUX_VERSION_IS_GEQ(3,3,0) + skb->wifi_acked_valid = 1; + skb->wifi_acked = acked; +#endif + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + + /* take a reference to prevent skb_orphan() from freeing the socket */ + sock_hold(sk); + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); +#endif + +#if LINUX_VERSION_IS_GEQ(3,17,0) +int __sched out_of_line_wait_on_bit_timeout( + void *word, int bit, wait_bit_action_f *action, + unsigned mode, unsigned long timeout) +{ + wait_queue_head_t *wq = bit_waitqueue(word, bit); + DEFINE_WAIT_BIT(wait, word, bit); + + wait.key.private = jiffies + timeout; + return __wait_on_bit(wq, &wait, action, mode); +} +EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); + +__sched int bit_wait_timeout(struct wait_bit_key *word) +{ + unsigned long now = ACCESS_ONCE(jiffies); + if (signal_pending_state(current->state, current)) + return 1; + if (time_after_eq(now, word->private)) + return -EAGAIN; + schedule_timeout(word->private - now); + return 0; +} +EXPORT_SYMBOL_GPL(bit_wait_timeout); +#endif + +#ifdef CONFIG_OF +/** + * of_find_property_value_of_size + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @len: requested length of property value + * + * Search for a property in a device node and valid the requested size. + * Returns the property value on success, -EINVAL if the property does not + * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + */ +void *of_find_property_value_of_size(const struct device_node *np, + const char *propname, u32 len) +{ + struct property *prop = of_find_property(np, propname, NULL); + + if (!prop) + return ERR_PTR(-EINVAL); + if (!prop->value) + return ERR_PTR(-ENODATA); + if (len > prop->length) + return ERR_PTR(-EOVERFLOW); + + return prop->value; +} + +/** + * of_property_read_u64_array - Find and read an array of 64 bit integers + * from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 64-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u64 value can be decoded. + */ +int of_property_read_u64_array(const struct device_node *np, + const char *propname, u64 *out_values, + size_t sz) +{ + const __be32 *val = of_find_property_value_of_size(np, propname, + (sz * sizeof(*out_values))); + + if (IS_ERR(val)) + return PTR_ERR(val); + + while (sz--) { + *out_values++ = of_read_number(val, 2); + val += 2; + } + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u64_array); +#endif /* CONFIG_OF */ + +#if !(LINUX_VERSION_IS_GEQ(3,17,3) || \ + (LINUX_VERSION_IS_GEQ(3,14,24) && \ + LINUX_VERSION_IS_LESS(3,15,0)) || \ + (LINUX_VERSION_IS_GEQ(3,12,33) && \ + LINUX_VERSION_IS_LESS(3,13,0)) || \ + (LINUX_VERSION_IS_GEQ(3,10,60) && \ + LINUX_VERSION_IS_LESS(3,11,0)) || \ + (LINUX_VERSION_IS_GEQ(3,4,106) && \ + LINUX_VERSION_IS_LESS(3,5,0)) || \ + (LINUX_VERSION_IS_GEQ(3,2,65) && \ + LINUX_VERSION_IS_LESS(3,3,0))) +/** + * memzero_explicit - Fill a region of memory (e.g. sensitive + * keying data) with 0s. + * @s: Pointer to the start of the area. + * @count: The size of the area. + * + * Note: usually using memset() is just fine (!), but in cases + * where clearing out _local_ data at the end of a scope is + * necessary, memzero_explicit() should be used instead in + * order to prevent the compiler from optimising away zeroing. + * + * memzero_explicit() doesn't need an arch-specific version as + * it just invokes the one of memset() implicitly. + */ +void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + barrier_data(s); +} +EXPORT_SYMBOL_GPL(memzero_explicit); +#endif diff --git a/compat/backport-3.19.c b/compat/backport-3.19.c new file mode 100644 index 0000000..019644d --- /dev/null +++ b/compat/backport-3.19.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2014 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.19. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_LESS(3,18,12) +static inline bool is_kthread_should_stop(void) +{ + return (current->flags & PF_KTHREAD) && kthread_should_stop(); +} + +/* + * DEFINE_WAIT_FUNC(wait, woken_wake_func); + * + * add_wait_queue(&wq, &wait); + * for (;;) { + * if (condition) + * break; + * + * p->state = mode; condition = true; + * smp_mb(); // A smp_wmb(); // C + * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN; + * schedule() try_to_wake_up(); + * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ + * wait->flags &= ~WQ_FLAG_WOKEN; condition = true; + * smp_mb() // B smp_wmb(); // C + * wait->flags |= WQ_FLAG_WOKEN; + * } + * remove_wait_queue(&wq, &wait); + * + */ +long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) +{ + set_current_state(mode); /* A */ + /* + * The above implies an smp_mb(), which matches with the smp_wmb() from + * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must + * also observe all state before the wakeup. + */ + if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) + timeout = schedule_timeout(timeout); + __set_current_state(TASK_RUNNING); + + /* + * The below implies an smp_mb(), it too pairs with the smp_wmb() from + * woken_wake_function() such that we must either observe the wait + * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss + * an event. + */ + set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ + + return timeout; +} +EXPORT_SYMBOL(wait_woken); + +int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) +{ + /* + * Although this function is called under waitqueue lock, LOCK + * doesn't imply write barrier and the users expects write + * barrier semantics on wakeup functions. The following + * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() + * and is paired with set_mb() in wait_woken(). + */ + smp_wmb(); /* C */ + wait->flags |= WQ_FLAG_WOKEN; + + return default_wake_function(wait, mode, sync, key); +} +EXPORT_SYMBOL(woken_wake_function); +#endif + +static u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(netdev_rss_key)); +#ifdef __BACKPORT_NET_GET_RANDOM_ONCE + net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); + memcpy(buffer, netdev_rss_key, len); +#else + get_random_bytes(buffer, len); +#endif +} +EXPORT_SYMBOL_GPL(netdev_rss_key_fill); + +#if defined(CONFIG_DEBUG_FS) +struct debugfs_devm_entry { + int (*read)(struct seq_file *seq, void *data); + struct device *dev; +}; + +static int debugfs_devm_entry_open(struct inode *inode, struct file *f) +{ + struct debugfs_devm_entry *entry = inode->i_private; + + return single_open(f, entry->read, entry->dev); +} + +static const struct file_operations debugfs_devm_entry_ops = { + .owner = THIS_MODULE, + .open = debugfs_devm_entry_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +/** + * debugfs_create_devm_seqfile - create a debugfs file that is bound to device. + * + * @dev: device related to this debugfs file. + * @name: name of the debugfs file. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is %NULL, then the + * file will be created in the root of the debugfs filesystem. + * @read_fn: function pointer called to print the seq_file content. + */ +struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)) +{ + struct debugfs_devm_entry *entry; + + if (IS_ERR(parent)) + return ERR_PTR(-ENOENT); + + entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + entry->read = read_fn; + entry->dev = dev; + + return debugfs_create_file(name, S_IRUGO, parent, entry, + &debugfs_devm_entry_ops); +} +EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile); + +#endif /* CONFIG_DEBUG_FS */ + +int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(skb_ensure_writable); diff --git a/compat/backport-3.2.c b/compat/backport-3.2.c new file mode 100644 index 0000000..601a168 --- /dev/null +++ b/compat/backport-3.2.c @@ -0,0 +1,25 @@ +/* + * Linux backport symbols for kernels 3.2. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +int hex2bin(u8 *dst, const char *src, size_t count) +{ + while (count--) { + int hi = hex_to_bin(*src++); + int lo = hex_to_bin(*src++); + + if ((hi < 0) || (lo < 0)) + return -1; + + *dst++ = (hi << 4) | lo; + } + return 0; +} +EXPORT_SYMBOL_GPL(hex2bin); diff --git a/compat/backport-4.0.c b/compat/backport-4.0.c new file mode 100644 index 0000000..8ae1611 --- /dev/null +++ b/compat/backport-4.0.c @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2015 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.0. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static __always_inline long __get_user_pages_locked(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + int write, int force, + struct page **pages, + struct vm_area_struct **vmas, + int *locked, bool notify_drop, + unsigned int flags) +{ + long ret, pages_done; + bool lock_dropped; + + if (locked) { + /* if VM_FAULT_RETRY can be returned, vmas become invalid */ + BUG_ON(vmas); + /* check caller initialized locked */ + BUG_ON(*locked != 1); + } + + if (pages) + flags |= FOLL_GET; + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + pages_done = 0; + lock_dropped = false; + for (;;) { + ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, + vmas, locked); + if (!locked) + /* VM_FAULT_RETRY couldn't trigger, bypass */ + return ret; + + /* VM_FAULT_RETRY cannot return errors */ + if (!*locked) { + BUG_ON(ret < 0); + BUG_ON(ret >= nr_pages); + } + + if (!pages) + /* If it's a prefault don't insist harder */ + return ret; + + if (ret > 0) { + nr_pages -= ret; + pages_done += ret; + if (!nr_pages) + break; + } + if (*locked) { + /* VM_FAULT_RETRY didn't trigger */ + if (!pages_done) + pages_done = ret; + break; + } + /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ + pages += ret; + start += ret << PAGE_SHIFT; + + /* + * Repeat on the address that fired VM_FAULT_RETRY + * without FAULT_FLAG_ALLOW_RETRY but with + * FAULT_FLAG_TRIED. + */ + *locked = 1; + lock_dropped = true; + down_read(&mm->mmap_sem); + ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, + pages, NULL, NULL); + if (ret != 1) { + BUG_ON(ret > 1); + if (!pages_done) + pages_done = ret; + break; + } + nr_pages--; + pages_done++; + if (!nr_pages) + break; + pages++; + start += PAGE_SIZE; + } + if (notify_drop && lock_dropped && *locked) { + /* + * We must let the caller know we temporarily dropped the lock + * and so the critical section protected by it was lost. + */ + up_read(&mm->mmap_sem); + *locked = 0; + } + return pages_done; +} + +/* + * We can leverage the VM_FAULT_RETRY functionality in the page fault + * paths better by using either get_user_pages_locked() or + * get_user_pages_unlocked(). + * + * get_user_pages_locked() is suitable to replace the form: + * + * down_read(&mm->mmap_sem); + * do_something() + * get_user_pages(tsk, mm, ..., pages, NULL); + * up_read(&mm->mmap_sem); + * + * to: + * + * int locked = 1; + * down_read(&mm->mmap_sem); + * do_something() + * get_user_pages_locked(tsk, mm, ..., pages, &locked); + * if (locked) + * up_read(&mm->mmap_sem); + */ +long get_user_pages_locked(unsigned long start, unsigned long nr_pages, + int write, int force, struct page **pages, + int *locked) +{ + return __get_user_pages_locked(current, current->mm, start, nr_pages, + write, force, pages, NULL, locked, true, + FOLL_TOUCH); +} +EXPORT_SYMBOL_GPL(get_user_pages_locked); + +/* + * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to + * pass additional gup_flags as last parameter (like FOLL_HWPOISON). + * + * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the + * caller if required (just like with __get_user_pages). "FOLL_GET", + * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed + * according to the parameters "pages", "write", "force" + * respectively. + */ +static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + int write, int force, struct page **pages, + unsigned int gup_flags) +{ + long ret; + int locked = 1; + down_read(&mm->mmap_sem); + ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, + pages, NULL, &locked, false, gup_flags); + if (locked) + up_read(&mm->mmap_sem); + return ret; +} + +/* + * get_user_pages_unlocked() is suitable to replace the form: + * + * down_read(&mm->mmap_sem); + * get_user_pages(tsk, mm, ..., pages, NULL); + * up_read(&mm->mmap_sem); + * + * with: + * + * get_user_pages_unlocked(tsk, mm, ..., pages); + * + * It is functionally equivalent to get_user_pages_fast so + * get_user_pages_fast should be used instead, if the two parameters + * "tsk" and "mm" are respectively equal to current and current->mm, + * or if "force" shall be set to 1 (get_user_pages_fast misses the + * "force" parameter). + */ +long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + int write, int force, struct page **pages) +{ + return __get_user_pages_unlocked(current, current->mm, start, nr_pages, + write, force, pages, FOLL_TOUCH); +} +EXPORT_SYMBOL_GPL(get_user_pages_unlocked); + + +/** + * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory + * @buf: data blob to dump + * @len: number of bytes in the @buf + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) + * @linebuf: where to put the converted data + * @linebuflen: total size of @linebuf, including space for terminating NUL + * @ascii: include ASCII after the hex output + * + * hex_dump_to_buffer() works on one "line" of output at a time, i.e., + * 16 or 32 bytes of input data converted to hex + ASCII output. + * + * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data + * to a hex + ASCII dump at the supplied memory location. + * The converted output is always NUL-terminated. + * + * E.g.: + * hex_dump_to_buffer(frame->data, frame->len, 16, 1, + * linebuf, sizeof(linebuf), true); + * + * example output buffer: + * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO + * + * Return: + * The amount of bytes placed in the buffer without terminating NUL. If the + * output was truncated, then the return value is the number of bytes + * (excluding the terminating NUL) which would have been written to the final + * string if enough space had been available. + */ +int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, + char *linebuf, size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + int ngroups; + u8 ch; + int j, lx = 0; + int ascii_column; + int ret; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if (!is_power_of_2(groupsize) || groupsize > 8) + groupsize = 1; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + ngroups = len / groupsize; + ascii_column = rowsize * 2 + rowsize / groupsize + 1; + + if (!linebuflen) + goto overflow1; + + if (!len) + goto nil; + + if (groupsize == 8) { + const u64 *ptr8 = buf; + + for (j = 0; j < ngroups; j++) { + ret = snprintf(linebuf + lx, linebuflen - lx, + "%s%16.16llx", j ? " " : "", + get_unaligned(ptr8 + j)); + if (ret >= linebuflen - lx) + goto overflow1; + lx += ret; + } + } else if (groupsize == 4) { + const u32 *ptr4 = buf; + + for (j = 0; j < ngroups; j++) { + ret = snprintf(linebuf + lx, linebuflen - lx, + "%s%8.8x", j ? " " : "", + get_unaligned(ptr4 + j)); + if (ret >= linebuflen - lx) + goto overflow1; + lx += ret; + } + } else if (groupsize == 2) { + const u16 *ptr2 = buf; + + for (j = 0; j < ngroups; j++) { + ret = snprintf(linebuf + lx, linebuflen - lx, + "%s%4.4x", j ? " " : "", + get_unaligned(ptr2 + j)); + if (ret >= linebuflen - lx) + goto overflow1; + lx += ret; + } + } else { + for (j = 0; j < len; j++) { + if (linebuflen < lx + 3) + goto overflow2; + ch = ptr[j]; + linebuf[lx++] = hex_asc_hi(ch); + linebuf[lx++] = hex_asc_lo(ch); + linebuf[lx++] = ' '; + } + if (j) + lx--; + } + if (!ascii) + goto nil; + + while (lx < ascii_column) { + if (linebuflen < lx + 2) + goto overflow2; + linebuf[lx++] = ' '; + } + for (j = 0; j < len; j++) { + if (linebuflen < lx + 2) + goto overflow2; + ch = ptr[j]; + linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; + } +nil: + linebuf[lx] = '\0'; + return lx; +overflow2: + linebuf[lx++] = '\0'; +overflow1: + return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1; +} +EXPORT_SYMBOL_GPL(hex_dump_to_buffer); diff --git a/compat/backport-4.1.c b/compat/backport-4.1.c new file mode 100644 index 0000000..e0a3ec6 --- /dev/null +++ b/compat/backport-4.1.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2015 Stefan Assmann + * Copyright (c) 2015 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.1. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +netdev_features_t passthru_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + return features; +} +EXPORT_SYMBOL_GPL(passthru_features_check); + +#ifdef CONFIG_TTY +#if LINUX_VERSION_IS_GEQ(4,0,0) +static void unset_locked_termios(struct ktermios *termios, + struct ktermios *old, + struct ktermios *locked) +{ + int i; + +#define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z))) + + if (!locked) { + printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); + return; + } + + NOSET_MASK(termios->c_iflag, old->c_iflag, locked->c_iflag); + NOSET_MASK(termios->c_oflag, old->c_oflag, locked->c_oflag); + NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); + NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag); + termios->c_line = locked->c_line ? old->c_line : termios->c_line; + for (i = 0; i < NCCS; i++) + termios->c_cc[i] = locked->c_cc[i] ? + old->c_cc[i] : termios->c_cc[i]; + /* FIXME: What should we do for i/ospeed */ +} + +int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) +{ + struct ktermios old_termios; + struct tty_ldisc *ld; + + WARN_ON(tty->driver->type == TTY_DRIVER_TYPE_PTY && + tty->driver->subtype == PTY_TYPE_MASTER); + /* + * Perform the actual termios internal changes under lock. + */ + + + /* FIXME: we need to decide on some locking/ordering semantics + for the set_termios notification eventually */ + down_write(&tty->termios_rwsem); + old_termios = tty->termios; + tty->termios = *new_termios; + unset_locked_termios(&tty->termios, &old_termios, &tty->termios_locked); + + if (tty->ops->set_termios) + tty->ops->set_termios(tty, &old_termios); + else + tty_termios_copy_hw(&tty->termios, &old_termios); + + ld = tty_ldisc_ref(tty); + if (ld != NULL) { + if (ld->ops->set_termios) + ld->ops->set_termios(tty, &old_termios); + tty_ldisc_deref(ld); + } + up_write(&tty->termios_rwsem); + return 0; +} +EXPORT_SYMBOL_GPL(tty_set_termios); +#endif /* LINUX_VERSION_IS_GEQ(4,0,0) */ +#endif /* CONFIG_TTY */ diff --git a/compat/backport-4.2.c b/compat/backport-4.2.c new file mode 100644 index 0000000..e00aa49 --- /dev/null +++ b/compat/backport-4.2.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.2. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +static struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len) +{ + for (;;) { + if (!len) + return src; + + if (src->length > len) + break; + + len -= src->length; + src = sg_next(src); + } + + sg_init_table(dst, 2); + sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); + scatterwalk_crypto_chain(dst, sg_next(src), 0, 2); + + return dst; +} + +struct aead_old_request { + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct aead_request subreq; +}; + +unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->reqsize + sizeof(struct aead_old_request); +} +EXPORT_SYMBOL_GPL(crypto_aead_reqsize); + +struct aead_request *crypto_backport_convert(struct aead_request *req) +{ + struct aead_old_request *nreq = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen); + dst = req->src == req->dst ? + src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen); + + aead_request_set_tfm(&nreq->subreq, aead); + aead_request_set_callback(&nreq->subreq, aead_request_flags(req), + req->base.complete, req->base.data); + aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen, + req->iv); + aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen); + + return &nreq->subreq; +} +EXPORT_SYMBOL_GPL(crypto_backport_convert); diff --git a/compat/backport-4.3.c b/compat/backport-4.3.c new file mode 100644 index 0000000..2d8a5e5 --- /dev/null +++ b/compat/backport-4.3.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2015 Hauke Mehrtens + * Copyright (c) 2015 - 2016 Intel Deutschland GmbH + * + * Backport functionality introduced in Linux 4.3. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_GEQ(3,8,0) +struct backport_thermal_ops_wrapper { + old_thermal_zone_device_ops_t ops; + struct thermal_zone_device_ops *driver_ops; +}; + +static int backport_thermal_get_temp(struct thermal_zone_device *dev, + unsigned long *temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + int _temp, ret; + + ret = wrapper->driver_ops->get_temp(dev, &_temp); + if (!ret) + *temp = (unsigned long)_temp; + + return ret; +} + +static int backport_thermal_get_trip_temp(struct thermal_zone_device *dev, + int i, unsigned long *temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + int _temp, ret; + + ret = wrapper->driver_ops->get_trip_temp(dev, i, &_temp); + if (!ret) + *temp = (unsigned long)_temp; + + return ret; +} + +static int backport_thermal_set_trip_temp(struct thermal_zone_device *dev, + int i, unsigned long temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + + return wrapper->driver_ops->set_trip_temp(dev, i, (int)temp); +} + +static int backport_thermal_get_trip_hyst(struct thermal_zone_device *dev, + int i, unsigned long *temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + int _temp, ret; + + ret = wrapper->driver_ops->get_trip_hyst(dev, i, &_temp); + if (!ret) + *temp = (unsigned long)_temp; + + return ret; +} + +static int backport_thermal_set_trip_hyst(struct thermal_zone_device *dev, + int i, unsigned long temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + + return wrapper->driver_ops->set_trip_hyst(dev, i, (int)temp); +} + +static int backport_thermal_get_crit_temp(struct thermal_zone_device *dev, + unsigned long *temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + int _temp, ret; + + ret = wrapper->driver_ops->get_crit_temp(dev, &_temp); + if (!ret) + *temp = (unsigned long)_temp; + + return ret; +} + +#if LINUX_VERSION_IS_GEQ(3, 19, 0) +static int backport_thermal_set_emul_temp(struct thermal_zone_device *dev, + unsigned long temp) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + + return wrapper->driver_ops->set_emul_temp(dev, (int)temp); +} +#endif /* LINUX_VERSION_IS_GEQ(3, 19, 0) */ + +struct thermal_zone_device *backport_thermal_zone_device_register( + const char *type, int trips, int mask, void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay) +{ + struct backport_thermal_ops_wrapper *wrapper = kzalloc(sizeof(*wrapper), GFP_KERNEL); + struct thermal_zone_device *ret; + + if (!wrapper) + return NULL; + + wrapper->driver_ops = ops; + +#define copy(_op) \ + wrapper->ops._op = ops->_op + + copy(bind); + copy(unbind); + copy(get_mode); + copy(set_mode); + copy(get_trip_type); + copy(get_trend); + copy(notify); + + /* Assign the backport ops to the old struct to get the + * correct types. But only assign if the registrant defined + * the ops. + */ +#define assign_ops(_op) \ + if (ops->_op) \ + wrapper->ops._op = backport_thermal_##_op + + assign_ops(get_temp); + assign_ops(get_trip_temp); + assign_ops(set_trip_temp); + assign_ops(get_trip_hyst); + assign_ops(set_trip_hyst); + assign_ops(get_crit_temp); +#if LINUX_VERSION_IS_GEQ(3, 19, 0) + assign_ops(set_emul_temp); +#endif /* LINUX_VERSION_IS_GEQ(3, 19, 0) */ +#undef assign_ops + + ret = old_thermal_zone_device_register(type, trips, mask, devdata, + &wrapper->ops, tzp, passive_delay, + polling_delay); + if (!ret) + kfree(wrapper); + return ret; +} +EXPORT_SYMBOL_GPL(backport_thermal_zone_device_register); + +void backport_thermal_zone_device_unregister(struct thermal_zone_device *dev) +{ + struct backport_thermal_ops_wrapper *wrapper = + container_of(dev->ops, struct backport_thermal_ops_wrapper, ops); + + old_thermal_zone_device_unregister(dev); + kfree(wrapper); +} +EXPORT_SYMBOL_GPL(backport_thermal_zone_device_unregister); + +#endif /* >= 3.8.0 */ + +static void seq_set_overflow(struct seq_file *m) +{ + m->count = m->size; +} + +/* A complete analogue of print_hex_dump() */ +void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, const void *buf, size_t len, + bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + int ret; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len && !seq_has_overflowed(m); i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + seq_printf(m, "%s%p: ", prefix_str, ptr + i); + break; + case DUMP_PREFIX_OFFSET: + seq_printf(m, "%s%.8x: ", prefix_str, i); + break; + default: + seq_printf(m, "%s", prefix_str); + break; + } + + ret = hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + m->buf + m->count, m->size - m->count, + ascii); + if (ret >= m->size - m->count) { + seq_set_overflow(m); + } else { + m->count += ret; + seq_putc(m, '\n'); + } + } +} +EXPORT_SYMBOL_GPL(seq_hex_dump); + +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + long res = 0; + + if (count == 0) + return -E2BIG; + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL_GPL(strscpy); diff --git a/compat/backport-4.4.c b/compat/backport-4.4.c new file mode 100644 index 0000000..1c11a20 --- /dev/null +++ b/compat/backport-4.4.c @@ -0,0 +1,154 @@ +/* + * Copyright(c) 2015 Intel Deutschland GmbH + * + * Backport functionality introduced in Linux 4.4. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +#if LINUX_VERSION_IS_LESS(4,3,0) +static ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[3]; + bool *val = file->private_data; + + if (*val) + buf[0] = 'Y'; + else + buf[0] = 'N'; + buf[1] = '\n'; + buf[2] = 0x00; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t debugfs_write_file_bool(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + size_t buf_size; + bool bv; + bool *val = file->private_data; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + if (strtobool(buf, &bv) == 0) + *val = bv; + + return count; +} +#endif /* < 4.3.0 */ + +static const struct file_operations fops_bool = { + .read = debugfs_read_file_bool, + .write = debugfs_write_file_bool, + .open = simple_open, + .llseek = default_llseek, +}; + +struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, bool *value) +{ + return debugfs_create_file(name, mode, parent, value, &fops_bool); +} +EXPORT_SYMBOL_GPL(debugfs_create_bool); +#endif /* CONFIG_DEBUG_FS */ + +/* Calculate expected number of TX descriptors */ +int tso_count_descs(struct sk_buff *skb) +{ + /* The Marvell Way */ + return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; +} +EXPORT_SYMBOL(tso_count_descs); + +void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, + int size, bool is_last) +{ + struct tcphdr *tcph; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int mac_hdr_len = skb_network_offset(skb); + + memcpy(hdr, skb->data, hdr_len); + if (!tso->ipv6) { + struct iphdr *iph = (void *)(hdr + mac_hdr_len); + + iph->id = htons(tso->ip_id); + iph->tot_len = htons(size + hdr_len - mac_hdr_len); + tso->ip_id++; + } else { + struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); + + iph->payload_len = htons(size + tcp_hdrlen(skb)); + } + tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); + put_unaligned_be32(tso->tcp_seq, &tcph->seq); + + if (!is_last) { + /* Clear all special flags for not last packet */ + tcph->psh = 0; + tcph->fin = 0; + tcph->rst = 0; + } +} +EXPORT_SYMBOL(tso_build_hdr); + +void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) +{ + tso->tcp_seq += size; + tso->size -= size; + tso->data += size; + + if ((tso->size == 0) && + (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; + + /* Move to next segment */ + tso->size = frag->size; + tso->data = page_address(skb_frag_page(frag)) + frag->page_offset; + tso->next_frag_idx++; + } +} +EXPORT_SYMBOL(tso_build_data); + +void tso_start(struct sk_buff *skb, struct tso_t *tso) +{ + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + tso->ip_id = ntohs(ip_hdr(skb)->id); + tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); + tso->next_frag_idx = 0; + tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6); + + /* Build first data */ + tso->size = skb_headlen(skb) - hdr_len; + tso->data = skb->data + hdr_len; + if ((tso->size == 0) && + (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; + + /* Move to next segment */ + tso->size = frag->size; + tso->data = page_address(skb_frag_page(frag)) + frag->page_offset; + tso->next_frag_idx++; + } +} +EXPORT_SYMBOL(tso_start); diff --git a/compat/backport-4.5.c b/compat/backport-4.5.c new file mode 100644 index 0000000..69751d5 --- /dev/null +++ b/compat/backport-4.5.c @@ -0,0 +1,153 @@ +/* + * Copyright(c) 2015 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.5. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_GEQ(3,19,0) +int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value) +{ + if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) + return -EBUSY; + + led_cdev->brightness = min(value, led_cdev->max_brightness); + + if (led_cdev->flags & LED_SUSPENDED) + return 0; + + if (led_cdev->brightness_set_sync) + return led_cdev->brightness_set_sync(led_cdev, + led_cdev->brightness); + return -ENOTSUPP; +} +EXPORT_SYMBOL_GPL(led_set_brightness_sync); +#endif /* >= 3.19 */ + +#if LINUX_VERSION_IS_GEQ(3,2,0) +/** + * no_seek_end_llseek - llseek implementation for fixed-sized devices + * @file: file structure to seek on + * @offset: file offset to seek to + * @whence: type of seek + * + */ +loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence) +{ + switch (whence) { + case SEEK_SET: case SEEK_CUR: +#if LINUX_VERSION_IS_GEQ(3,6,0) + return generic_file_llseek_size(file, offset, whence, + ~0ULL, 0); +#else + return generic_file_llseek_size(file, offset, whence, + ~0ULL); +#endif + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(no_seek_end_llseek); +#endif /* >= 3.2 */ + +/** + * memdup_user_nul - duplicate memory region from user space and NUL-terminate + * + * @src: source address in user space + * @len: number of bytes to copy + * + * Returns an ERR_PTR() on failure. + */ +void *memdup_user_nul(const void __user *src, size_t len) +{ + char *p; + + /* + * Always use GFP_KERNEL, since copy_from_user() can sleep and + * cause pagefault, which makes it pointless to use GFP_NOFS + * or GFP_ATOMIC. + */ + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + + return p; +} +EXPORT_SYMBOL_GPL(memdup_user_nul); + +void phy_attached_info(struct phy_device *phydev) +{ + phy_attached_print(phydev, NULL); +} +EXPORT_SYMBOL_GPL(phy_attached_info); + +#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) +{ + if (!fmt) { + dev_info(&phydev->dev, ATTACHED_FMT "\n", + phydev->drv->name, phydev_name(phydev), + phydev->irq); + } else { + va_list ap; + + dev_info(&phydev->dev, ATTACHED_FMT, + phydev->drv->name, phydev_name(phydev), + phydev->irq); + + va_start(ap, fmt); + vprintk(fmt, ap); + va_end(ap); + } +} +EXPORT_SYMBOL_GPL(phy_attached_print); + +static void devm_led_trigger_release(struct device *dev, void *res) +{ + led_trigger_unregister(*(struct led_trigger **)res); +} + +int devm_led_trigger_register(struct device *dev, + struct led_trigger *trig) +{ + struct led_trigger **dr; + int rc; + + dr = devres_alloc(devm_led_trigger_release, sizeof(*dr), + GFP_KERNEL); + if (!dr) + return -ENOMEM; + + *dr = trig; + + rc = led_trigger_register(trig); + if (rc) + devres_free(dr); + else + devres_add(dev, dr); + + return rc; +} +EXPORT_SYMBOL_GPL(devm_led_trigger_register); diff --git a/compat/backport-4.6.c b/compat/backport-4.6.c new file mode 100644 index 0000000..54ff669 --- /dev/null +++ b/compat/backport-4.6.c @@ -0,0 +1,77 @@ +/* + * Copyright(c) 2016 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.6. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +/** + * kstrtobool - convert common user inputs into boolean values + * @s: input string + * @res: result + * + * This routine returns 0 iff the first character is one of 'Yy1Nn0', or + * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value + * pointed to by res is updated upon finding a match. + */ +int kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(kstrtobool); + +/* + * Since "base" would be a nonsense argument, this open-codes the + * _from_user helper instead of using the helper macro below. + */ +int kstrtobool_from_user(const char __user *s, size_t count, bool *res) +{ + /* Longest string needed to differentiate, newline, terminator */ + char buf[4]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return kstrtobool(buf, res); +} +EXPORT_SYMBOL_GPL(kstrtobool_from_user); diff --git a/compat/backport-4.7.c b/compat/backport-4.7.c new file mode 100644 index 0000000..b064707 --- /dev/null +++ b/compat/backport-4.7.c @@ -0,0 +1,182 @@ +/* + * Copyright(c) 2016 Hauke Mehrtens + * + * Backport functionality introduced in Linux 4.7. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +/** + * __nla_reserve_64bit - reserve room for attribute on the skb and align it + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. It also ensure that this + * attribute will be 64-bit aign. + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr) +{ + if (nla_need_padding_for_64bit(skb)) + nla_align_64bit(skb, padattr); + + return __nla_reserve(skb, attrtype, attrlen); +} +EXPORT_SYMBOL_GPL(__nla_reserve_64bit); + +/** + * nla_reserve_64bit - reserve room for attribute on the skb and align it + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. It also ensure that this + * attribute will be 64-bit aign. + * + * Returns NULL if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, + int padattr) +{ + size_t len; + + if (nla_need_padding_for_64bit(skb)) + len = nla_total_size_64bit(attrlen); + else + len = nla_total_size(attrlen); + if (unlikely(skb_tailroom(skb) < len)) + return NULL; + + return __nla_reserve_64bit(skb, attrtype, attrlen, padattr); +} +EXPORT_SYMBOL_GPL(nla_reserve_64bit); + +/** + * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr) +{ + struct nlattr *nla; + + nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr); + memcpy(nla_data(nla), data, attrlen); +} +EXPORT_SYMBOL_GPL(__nla_put_64bit); + +/** + * nla_put_64bit - Add a netlink attribute to a socket buffer and align it + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr) +{ + size_t len; + + if (nla_need_padding_for_64bit(skb)) + len = nla_total_size_64bit(attrlen); + else + len = nla_total_size(attrlen); + if (unlikely(skb_tailroom(skb) < len)) + return -EMSGSIZE; + + __nla_put_64bit(skb, attrtype, attrlen, data, padattr); + return 0; +} +EXPORT_SYMBOL_GPL(nla_put_64bit); + +/* + * Below 3.18 or if the kernel has devcoredump disabled, we copied the + * entire devcoredump, so no need to define these functions. + */ +#if LINUX_VERSION_IS_GEQ(3,18,0) && \ + !defined(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP) +#include +#include + +static void devcd_free_sgtable(void *data) +{ + struct scatterlist *table = data; + int i; + struct page *page; + struct scatterlist *iter; + struct scatterlist *delete_iter; + + /* free pages */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + page = sg_page(iter); + if (page) + __free_page(page); + } + + /* then free all chained tables */ + iter = table; + delete_iter = table; /* always points on a head of a table */ + while (!sg_is_last(iter)) { + iter++; + if (sg_is_chain(iter)) { + iter = sg_chain_ptr(iter); + kfree(delete_iter); + delete_iter = iter; + } + } + + /* free the last table */ + kfree(delete_iter); +} + +static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, + size_t buf_len, void *data, + size_t data_len) +{ + struct scatterlist *table = data; + + if (offset > data_len) + return -EINVAL; + + if (offset + buf_len > data_len) + buf_len = data_len - offset; + return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, + offset); +} + +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + dev_coredumpm(dev, THIS_MODULE, table, datalen, gfp, + devcd_read_from_sgtable, devcd_free_sgtable); +} +EXPORT_SYMBOL_GPL(dev_coredumpsg); +#endif /* >= 3.18.0 */ diff --git a/compat/backport-4.8.c b/compat/backport-4.8.c new file mode 100644 index 0000000..7d05644 --- /dev/null +++ b/compat/backport-4.8.c @@ -0,0 +1,146 @@ +/* + * Copyright(c) 2017 Intel Deutschland GmbH + * + * Backport functionality introduced in Linux 4.8. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, + struct usb_interface *intf, + u8 *buffer, int buflen) +{ + /* duplicates are ignored */ + struct usb_cdc_union_desc *union_header = NULL; + + /* duplicates are not tolerated */ + struct usb_cdc_header_desc *header = NULL; + struct usb_cdc_ether_desc *ether = NULL; + struct usb_cdc_mdlm_detail_desc *detail = NULL; + struct usb_cdc_mdlm_desc *desc = NULL; + + unsigned int elength; + int cnt = 0; + + memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); + hdr->phonet_magic_present = false; + while (buflen > 0) { + elength = buffer[0]; + if (!elength) { + dev_err(&intf->dev, "skipping garbage byte\n"); + elength = 1; + goto next_desc; + } + if (buffer[1] != USB_DT_CS_INTERFACE) { + dev_err(&intf->dev, "skipping garbage\n"); + goto next_desc; + } + + switch (buffer[2]) { + case USB_CDC_UNION_TYPE: /* we've found it */ + if (elength < sizeof(struct usb_cdc_union_desc)) + goto next_desc; + if (union_header) { + dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); + goto next_desc; + } + union_header = (struct usb_cdc_union_desc *)buffer; + break; + case USB_CDC_COUNTRY_TYPE: + if (elength < sizeof(struct usb_cdc_country_functional_desc)) + goto next_desc; + hdr->usb_cdc_country_functional_desc = + (struct usb_cdc_country_functional_desc *)buffer; + break; + case USB_CDC_HEADER_TYPE: + if (elength != sizeof(struct usb_cdc_header_desc)) + goto next_desc; + if (header) + return -EINVAL; + header = (struct usb_cdc_header_desc *)buffer; + break; + case USB_CDC_ACM_TYPE: + if (elength < sizeof(struct usb_cdc_acm_descriptor)) + goto next_desc; + hdr->usb_cdc_acm_descriptor = + (struct usb_cdc_acm_descriptor *)buffer; + break; + case USB_CDC_ETHERNET_TYPE: + if (elength != sizeof(struct usb_cdc_ether_desc)) + goto next_desc; + if (ether) + return -EINVAL; + ether = (struct usb_cdc_ether_desc *)buffer; + break; + case USB_CDC_CALL_MANAGEMENT_TYPE: + if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) + goto next_desc; + hdr->usb_cdc_call_mgmt_descriptor = + (struct usb_cdc_call_mgmt_descriptor *)buffer; + break; + case USB_CDC_DMM_TYPE: + if (elength < sizeof(struct usb_cdc_dmm_desc)) + goto next_desc; + hdr->usb_cdc_dmm_desc = + (struct usb_cdc_dmm_desc *)buffer; + break; + case USB_CDC_MDLM_TYPE: + if (elength < sizeof(struct usb_cdc_mdlm_desc *)) + goto next_desc; + if (desc) + return -EINVAL; + desc = (struct usb_cdc_mdlm_desc *)buffer; + break; + case USB_CDC_MDLM_DETAIL_TYPE: + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) + goto next_desc; + if (detail) + return -EINVAL; + detail = (struct usb_cdc_mdlm_detail_desc *)buffer; + break; + case USB_CDC_NCM_TYPE: + if (elength < sizeof(struct usb_cdc_ncm_desc)) + goto next_desc; + hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; + break; + case USB_CDC_MBIM_TYPE: + if (elength < sizeof(struct usb_cdc_mbim_desc)) + goto next_desc; + + hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; + break; + case USB_CDC_MBIM_EXTENDED_TYPE: + if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) + break; + hdr->usb_cdc_mbim_extended_desc = + (struct usb_cdc_mbim_extended_desc *)buffer; + break; + case CDC_PHONET_MAGIC_NUMBER: + hdr->phonet_magic_present = true; + break; + default: + /* + * there are LOTS more CDC descriptors that + * could legitimately be found here. + */ + dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", + buffer[2], elength); + goto next_desc; + } + cnt++; +next_desc: + buflen -= elength; + buffer += elength; + } + hdr->usb_cdc_union_desc = union_header; + hdr->usb_cdc_header_desc = header; + hdr->usb_cdc_mdlm_detail_desc = detail; + hdr->usb_cdc_mdlm_desc = desc; + hdr->usb_cdc_ether_desc = ether; + return cnt; +} +EXPORT_SYMBOL_GPL(cdc_parse_cdc_header); diff --git a/compat/backports.h b/compat/backports.h new file mode 100644 index 0000000..ccc8972 --- /dev/null +++ b/compat/backports.h @@ -0,0 +1,26 @@ +#ifndef LINUX_BACKPORTS_PRIVATE_H +#define LINUX_BACKPORTS_PRIVATE_H + +#include + +#ifdef CPTCFG_BPAUTO_BUILD_CRYPTO_CCM +int crypto_ccm_module_init(void); +void crypto_ccm_module_exit(void); +#else +static inline int crypto_ccm_module_init(void) +{ return 0; } +static inline void crypto_ccm_module_exit(void) +{} +#endif + +#ifdef CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP +int devcoredump_init(void); +void devcoredump_exit(void); +#else +static inline int devcoredump_init(void) +{ return 0; } +static inline void devcoredump_exit(void) +{} +#endif + +#endif /* LINUX_BACKPORTS_PRIVATE_H */ diff --git a/compat/compat-3.0.c b/compat/compat-3.0.c new file mode 100644 index 0000000..1bed6a6 --- /dev/null +++ b/compat/compat-3.0.c @@ -0,0 +1,91 @@ +/* + * Copyright 2011 Hauke Mehrtens + * Copyright 2011 Alexey Dobriyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.0. + */ + +#include +#include + +int mac_pton(const char *s, u8 *mac) +{ + int i; + + /* XX:XX:XX:XX:XX:XX */ + if (strlen(s) < 3 * ETH_ALEN - 1) + return 0; + + /* Don't dirty result unless string is valid MAC. */ + for (i = 0; i < ETH_ALEN; i++) { + if (!strchr("0123456789abcdefABCDEF", s[i * 3])) + return 0; + if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1])) + return 0; + if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') + return 0; + } + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); + } + return 1; +} +EXPORT_SYMBOL_GPL(mac_pton); + +#define kstrto_from_user(f, g, type) \ +int f(const char __user *s, size_t count, unsigned int base, type *res) \ +{ \ + /* sign, base 2 representation, newline, terminator */ \ + char buf[1 + sizeof(type) * 8 + 1 + 1]; \ + \ + count = min(count, sizeof(buf) - 1); \ + if (copy_from_user(buf, s, count)) \ + return -EFAULT; \ + buf[count] = '\0'; \ + return g(buf, base, res); \ +} \ +EXPORT_SYMBOL_GPL(f) + +kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long); +kstrto_from_user(kstrtoll_from_user, kstrtoll, long long); +kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long); +kstrto_from_user(kstrtol_from_user, kstrtol, long); +kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int); +kstrto_from_user(kstrtoint_from_user, kstrtoint, int); +kstrto_from_user(kstrtou16_from_user, kstrtou16, u16); +kstrto_from_user(kstrtos16_from_user, kstrtos16, s16); +kstrto_from_user(kstrtou8_from_user, kstrtou8, u8); +kstrto_from_user(kstrtos8_from_user, kstrtos8, s8); + +/** + * strtobool - convert common user inputs into boolean values + * @s: input string + * @res: result + * + * This routine returns 0 iff the first character is one of 'Yy1Nn0'. + * Otherwise it will return -EINVAL. Value pointed to by res is + * updated upon finding a match. + */ +int strtobool(const char *s, bool *res) +{ + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + break; + case 'n': + case 'N': + case '0': + *res = false; + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(strtobool); diff --git a/compat/compat-3.1.c b/compat/compat-3.1.c new file mode 100644 index 0000000..2618780 --- /dev/null +++ b/compat/compat-3.1.c @@ -0,0 +1,118 @@ +/* + * Copyright 2012 Hauke Mehrtens + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.1. + */ + +#include +#include +#include + +static DEFINE_SPINLOCK(compat_simple_ida_lock); + +/** + * ida_simple_get - get a new id. + * @ida: the (initialized) ida. + * @start: the minimum id (inclusive, < 0x8000000) + * @end: the maximum id (exclusive, < 0x8000000 or 0) + * @gfp_mask: memory allocation flags + * + * Allocates an id in the range start <= id < end, or returns -ENOSPC. + * On memory allocation failure, returns -ENOMEM. + * + * Use ida_simple_remove() to get rid of an id. + */ +int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, + gfp_t gfp_mask) +{ + int ret, id; + unsigned int max; + unsigned long flags; + + BUG_ON((int)start < 0); + BUG_ON((int)end < 0); + + if (end == 0) + max = 0x80000000; + else { + BUG_ON(end < start); + max = end - 1; + } + +again: + if (!ida_pre_get(ida, gfp_mask)) + return -ENOMEM; + + spin_lock_irqsave(&compat_simple_ida_lock, flags); + ret = ida_get_new_above(ida, start, &id); + if (!ret) { + if (id > max) { + ida_remove(ida, id); + ret = -ENOSPC; + } else { + ret = id; + } + } + spin_unlock_irqrestore(&compat_simple_ida_lock, flags); + + if (unlikely(ret == -EAGAIN)) + goto again; + + return ret; +} +EXPORT_SYMBOL_GPL(ida_simple_get); + +/** + * ida_simple_remove - remove an allocated id. + * @ida: the (initialized) ida. + * @id: the id returned by ida_simple_get. + */ +void ida_simple_remove(struct ida *ida, unsigned int id) +{ + unsigned long flags; + + BUG_ON((int)id < 0); + spin_lock_irqsave(&compat_simple_ida_lock, flags); + ida_remove(ida, id); + spin_unlock_irqrestore(&compat_simple_ida_lock, flags); +} +EXPORT_SYMBOL_GPL(ida_simple_remove); +/* source lib/idr.c */ + +#ifdef CONFIG_OF +/** + * of_property_read_u32_array - Find and read an array of 32 bit integers + * from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int of_property_read_u32_array(const struct device_node *np, + const char *propname, u32 *out_values, + size_t sz) +{ + const __be32 *val = of_find_property_value_of_size(np, propname, + (sz * sizeof(*out_values))); + + if (IS_ERR(val)) + return PTR_ERR(val); + + while (sz--) + *out_values++ = be32_to_cpup(val++); + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u32_array); +#endif diff --git a/compat/compat-3.3.c b/compat/compat-3.3.c new file mode 100644 index 0000000..c064f09 --- /dev/null +++ b/compat/compat-3.3.c @@ -0,0 +1,239 @@ +/* + * Copyright 2012 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.3. + */ + +#include +#include +#include +#include +#include +#include +#include + +static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) +{ + new->tstamp = old->tstamp; + new->dev = old->dev; + new->transport_header = old->transport_header; + new->network_header = old->network_header; + new->mac_header = old->mac_header; + skb_dst_copy(new, old); + new->rxhash = old->rxhash; +#if LINUX_VERSION_IS_GEQ(3,1,0) + new->ooo_okay = old->ooo_okay; +#endif +#if LINUX_VERSION_IS_GEQ(3,2,0) + new->l4_rxhash = old->l4_rxhash; +#endif +#ifdef CONFIG_XFRM + new->sp = secpath_get(old->sp); +#endif + memcpy(new->cb, old->cb, sizeof(old->cb)); + new->csum = old->csum; + new->local_df = old->local_df; + new->pkt_type = old->pkt_type; + new->ip_summed = old->ip_summed; + skb_copy_queue_mapping(new, old); + new->priority = old->priority; +#if IS_ENABLED(CONFIG_IP_VS) + new->ipvs_property = old->ipvs_property; +#endif + new->protocol = old->protocol; + new->mark = old->mark; + new->skb_iif = old->skb_iif; + __nf_copy(new, old); +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) + new->nf_trace = old->nf_trace; +#endif +#ifdef CONFIG_NET_SCHED + new->tc_index = old->tc_index; +#ifdef CONFIG_NET_CLS_ACT + new->tc_verd = old->tc_verd; +#endif +#endif + new->vlan_tci = old->vlan_tci; + + skb_copy_secmark(new, old); +} + +static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) +{ +#ifndef NET_SKBUFF_DATA_USES_OFFSET + /* + * Shift between the two data areas in bytes + */ + unsigned long offset = new->data - old->data; +#endif + + __copy_skb_header(new, old); + +#ifndef NET_SKBUFF_DATA_USES_OFFSET + /* {transport,network,mac}_header are relative to skb->head */ + new->transport_header += offset; + new->network_header += offset; + if (skb_mac_header_was_set(new)) + new->mac_header += offset; +#endif + skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; + skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; + skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; +} + +static void skb_clone_fraglist(struct sk_buff *skb) +{ + struct sk_buff *list; + + skb_walk_frags(skb, list) + skb_get(list); +} + + +/** + * __pskb_copy - create copy of an sk_buff with private head. + * @skb: buffer to copy + * @headroom: headroom of new skb + * @gfp_mask: allocation priority + * + * Make a copy of both an &sk_buff and part of its data, located + * in header. Fragmented data remain shared. This is used when + * the caller wishes to modify only header of &sk_buff and needs + * private copy of the header to alter. Returns %NULL on failure + * or the pointer to the buffer on success. + * The returned buffer has a reference count of 1. + */ + +struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) +{ + unsigned int size = skb_headlen(skb) + headroom; + struct sk_buff *n = alloc_skb(size, gfp_mask); + + if (!n) + goto out; + + /* Set the data pointer */ + skb_reserve(n, headroom); + /* Set the tail pointer and length */ + skb_put(n, skb_headlen(skb)); + /* Copy the bytes */ + skb_copy_from_linear_data(skb, n->data, n->len); + + n->truesize += skb->data_len; + n->data_len = skb->data_len; + n->len = skb->len; + + if (skb_shinfo(skb)->nr_frags) { + int i; + +/* + * SKBTX_DEV_ZEROCOPY was added on 3.1 as well but requires ubuf + * stuff added to the skb which we do not have + */ +#if 0 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, gfp_mask)) { + kfree_skb(n); + n = NULL; + goto out; + } + } +#endif + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; +#if LINUX_VERSION_IS_GEQ(3,2,0) + skb_frag_ref(skb, i); +#else + get_page(skb_shinfo(skb)->frags[i].page); +#endif + } + skb_shinfo(n)->nr_frags = i; + } + + if (skb_has_frag_list(skb)) { + skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; + skb_clone_fraglist(n); + } + + copy_skb_header(n, skb); +out: + return n; +} +EXPORT_SYMBOL_GPL(__pskb_copy); + +static DEFINE_SPINLOCK(wq_name_lock); +static LIST_HEAD(wq_name_list); + +struct wq_name { + struct list_head list; + struct workqueue_struct *wq; + char name[24]; +}; + +struct workqueue_struct * +backport_alloc_workqueue(const char *fmt, unsigned int flags, + int max_active, struct lock_class_key *key, + const char *lock_name, ...) +{ + struct workqueue_struct *wq; + struct wq_name *n = kzalloc(sizeof(*n), GFP_KERNEL); + va_list args; + + if (!n) + return NULL; + + va_start(args, lock_name); + vsnprintf(n->name, sizeof(n->name), fmt, args); + va_end(args); + + wq = __alloc_workqueue_key(n->name, flags, max_active, key, lock_name); + if (!wq) { + kfree(n); + return NULL; + } + + n->wq = wq; + spin_lock(&wq_name_lock); + list_add(&n->list, &wq_name_list); + spin_unlock(&wq_name_lock); + + return wq; +} +EXPORT_SYMBOL_GPL(backport_alloc_workqueue); + +void backport_destroy_workqueue(struct workqueue_struct *wq) +{ + struct wq_name *n, *tmp; + + /* call original */ +#undef destroy_workqueue + destroy_workqueue(wq); + + spin_lock(&wq_name_lock); + list_for_each_entry_safe(n, tmp, &wq_name_list, list) { + if (n->wq == wq) { + list_del(&n->list); + kfree(n); + break; + } + } + spin_unlock(&wq_name_lock); +} +EXPORT_SYMBOL_GPL(backport_destroy_workqueue); + +void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, + struct nlmsghdr *nlh, gfp_t flags) +{ + struct sock *sk = net->genl_sock; + int report = 0; + + if (nlh) + report = nlmsg_report(nlh); + + nlmsg_notify(sk, skb, pid, group, report, flags); +} +EXPORT_SYMBOL_GPL(genl_notify); diff --git a/compat/compat-3.4.c b/compat/compat-3.4.c new file mode 100644 index 0000000..5275372 --- /dev/null +++ b/compat/compat-3.4.c @@ -0,0 +1,201 @@ +/* + * Copyright 2012 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.4. + */ + +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_GEQ(3,2,0) +#include +#include +#include +#endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ + +#if LINUX_VERSION_IS_GEQ(3,2,0) + +#if defined(CONFIG_REGMAP) +static void devm_regmap_release(struct device *dev, void *res) +{ + regmap_exit(*(struct regmap **)res); +} + +#if defined(CONFIG_REGMAP_I2C) +static int regmap_i2c_write( + struct device *dev, + const void *data, + size_t count) +{ + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + ret = i2c_master_send(i2c, data, count); + if (ret == count) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static int regmap_i2c_gather_write( + struct device *dev, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct i2c_client *i2c = to_i2c_client(dev); + struct i2c_msg xfer[2]; + int ret; + + /* If the I2C controller can't do a gather tell the core, it + * will substitute in a linear write for us. + */ + if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART)) + return -ENOTSUPP; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = reg_size; + xfer[0].buf = (void *)reg; + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_NOSTART; + xfer[1].len = val_size; + xfer[1].buf = (void *)val; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + return 0; + if (ret < 0) + return ret; + else + return -EIO; +} + +static int regmap_i2c_read( + struct device *dev, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct i2c_client *i2c = to_i2c_client(dev); + struct i2c_msg xfer[2]; + int ret; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = reg_size; + xfer[0].buf = (void *)reg; + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = val_size; + xfer[1].buf = val; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static struct regmap_bus regmap_i2c = { + .write = regmap_i2c_write, + .gather_write = regmap_i2c_gather_write, + .read = regmap_i2c_read, +}; +#endif /* defined(CONFIG_REGMAP_I2C) */ + +/** + * devm_regmap_init(): Initialise managed register map + * + * @dev: Device that will be interacted with + * @bus: Bus-specific callbacks to use with device + * @bus_context: Data passed to bus-specific callbacks + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. This function should generally not be called + * directly, it should be called by bus-specific init functions. The + * map will be automatically freed by the device management code. + */ +struct regmap *devm_regmap_init(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config) +{ + struct regmap **ptr, *regmap; + + ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + regmap = regmap_init(dev, + bus, + config); + if (!IS_ERR(regmap)) { + *ptr = regmap; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return regmap; +} +EXPORT_SYMBOL_GPL(devm_regmap_init); + +#if defined(CONFIG_REGMAP_I2C) +/** + * devm_regmap_init_i2c(): Initialise managed register map + * + * @i2c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config) +{ + return devm_regmap_init(&i2c->dev, ®map_i2c, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); +#endif /* defined(CONFIG_REGMAP_I2C) */ + +#endif /* defined(CONFIG_REGMAP) */ +#endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ + +int simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + return 0; +} +EXPORT_SYMBOL_GPL(simple_open); + +#ifdef CONFIG_COMPAT +static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) +{ + return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || + __put_user(ts->tv_sec, &cts->tv_sec) || + __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; +} + +int compat_put_timespec(const struct timespec *ts, void __user *uts) +{ + if (COMPAT_USE_64BIT_TIME) + return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; + else + return __compat_put_timespec(ts, uts); +} +EXPORT_SYMBOL_GPL(compat_put_timespec); +#endif diff --git a/compat/compat-3.5.c b/compat/compat-3.5.c new file mode 100644 index 0000000..de31228 --- /dev/null +++ b/compat/compat-3.5.c @@ -0,0 +1,167 @@ +/* + * Copyright 2012-2013 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.5. + */ + +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_GEQ(3,2,0) +#include + +/** + * devres_release - Find a device resource and destroy it, calling release + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically, the + * release function called and the resource freed. + * + * RETURNS: + * 0 if devres is found and freed, -ENOENT if not found. + */ +int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + void *res; + + res = devres_remove(dev, release, match, match_data); + if (unlikely(!res)) + return -ENOENT; + + (*release)(dev, res); + devres_free(res); + return 0; +} +EXPORT_SYMBOL_GPL(devres_release); +#endif /* LINUX_VERSION_IS_GEQ(3,2,0) */ + +/* + * Commit 7a4e7408c5cadb240e068a662251754a562355e3 + * exported overflowuid and overflowgid for all + * kernel configurations, prior to that we only + * had it exported when CONFIG_UID16 was enabled. + * We are technically redefining it here but + * nothing seems to be changing it, except + * kernel/ code does epose it via sysctl and + * proc... if required later we can add that here. + */ +#ifndef CONFIG_UID16 +int overflowuid = DEFAULT_OVERFLOWUID; +int overflowgid = DEFAULT_OVERFLOWGID; + +EXPORT_SYMBOL_GPL(overflowuid); +EXPORT_SYMBOL_GPL(overflowgid); +#endif + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int ptp_clock_index(struct ptp_clock *ptp) +{ + return ptp->index; +} +EXPORT_SYMBOL(ptp_clock_index); +#endif /* CONFIG_PTP_1588_CLOCK */ + +#ifdef CONFIG_GPIOLIB +static void devm_gpio_release(struct device *dev, void *res) +{ + unsigned *gpio = res; + + gpio_free(*gpio); +} + +/** + * devm_gpio_request - request a GPIO for a managed device + * @dev: device to request the GPIO for + * @gpio: GPIO to allocate + * @label: the name of the requested GPIO + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as + * gpio_request(). GPIOs requested with this function will be + * automatically freed on driver detach. + * + * If an GPIO allocated with this function needs to be freed + * separately, devm_gpio_free() must be used. + */ + +int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) +{ + unsigned *dr; + int rc; + + dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = gpio_request(gpio, label); + if (rc) { + devres_free(dr); + return rc; + } + + *dr = gpio; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_gpio_request); + +/** + * devm_gpio_request_one - request a single GPIO with initial setup + * @dev: device to request for + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + */ +int devm_gpio_request_one(struct device *dev, unsigned gpio, + unsigned long flags, const char *label) +{ + unsigned *dr; + int rc; + + dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = gpio_request_one(gpio, flags, label); + if (rc) { + devres_free(dr); + return rc; + } + + *dr = gpio; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_gpio_request_one); + +static int devm_gpio_match(struct device *dev, void *res, void *data) +{ + unsigned *this = res, *gpio = data; + + return *this == *gpio; +} + +void devm_gpio_free(struct device *dev, unsigned int gpio) +{ + WARN_ON(devres_destroy(dev, devm_gpio_release, devm_gpio_match, + &gpio)); + gpio_free(gpio); +} +EXPORT_SYMBOL_GPL(devm_gpio_free); +#endif /* CONFIG_GPIOLIB */ diff --git a/compat/compat-3.6.c b/compat/compat-3.6.c new file mode 100644 index 0000000..ef36485 --- /dev/null +++ b/compat/compat-3.6.c @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013 Luis R. Rodriguez + * + * Backport compatibility file for Linux for kernels 3.6. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +/* whoopsie ! */ +#ifndef CONFIG_COMMON_CLK +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL_GPL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL_GPL(clk_disable); +#endif diff --git a/compat/compat-3.7.c b/compat/compat-3.7.c new file mode 100644 index 0000000..a70709c --- /dev/null +++ b/compat/compat-3.7.c @@ -0,0 +1,290 @@ +/* + * Copyright 2012 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux 3.7. + */ + +#include +#include +#include +#include +#include +#include + +bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, + unsigned long delay) +{ + cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return false; +} +EXPORT_SYMBOL_GPL(mod_delayed_work); + +#ifdef CONFIG_PCI +/* + * Kernels >= 3.7 get their PCI-E Capabilities Register cached + * via the pci_dev->pcie_flags_reg so for older kernels we have + * no other option but to read this every single time we need + * it accessed. If we really cared to improve the efficiency + * of this we could try to find an unused u16 varible on the + * pci_dev but if we found it we likely would remove it from + * the kernel anyway right? Bite me. + */ +static inline u16 pcie_flags_reg(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + + return reg16; +} + +#define pci_pcie_type LINUX_BACKPORT(pci_pcie_type) +static inline int pci_pcie_type(struct pci_dev *dev) +{ + return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; +} + +#define pcie_cap_version LINUX_BACKPORT(pcie_cap_version) +static inline int pcie_cap_version(struct pci_dev *dev) +{ + return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS; +} + +static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT); +} + +static inline bool pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return true; + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pcie_capability_read_word); + +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pcie_capability_read_dword); + +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL_GPL(pcie_capability_write_word); + +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) +{ + if (pos & 3) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL_GPL(pcie_capability_write_dword); + +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_word(dev, pos, val); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pcie_capability_clear_and_set_word); + +int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + int ret; + u32 val; + + ret = pcie_capability_read_dword(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_dword(dev, pos, val); + } + + return ret; +} +EXPORT_SYMBOL_GPL(pcie_capability_clear_and_set_dword); +#endif + +#ifdef CONFIG_OF +#if LINUX_VERSION_IS_LESS(3,7,0) +/** + * of_get_child_by_name - Find the child node by name for a given parent + * @node: parent node + * @name: child name to look for. + * + * This function looks for child node for given matching name + * + * Returns a node pointer if found, with refcount incremented, use + * of_node_put() on it when done. + * Returns NULL if node is not found. + */ +struct device_node *of_get_child_by_name(const struct device_node *node, + const char *name) +{ + struct device_node *child; + + for_each_child_of_node(node, child) + if (child->name && (of_node_cmp(child->name, name) == 0)) + break; + return child; +} +EXPORT_SYMBOL_GPL(of_get_child_by_name); +#endif /* LINUX_VERSION_IS_LESS(3,7,0) */ +#endif /* CONFIG_OF */ + +int sg_nents(struct scatterlist *sg) +{ + int nents; + for (nents = 0; sg; sg = sg_next(sg)) + nents++; + return nents; +} +EXPORT_SYMBOL_GPL(sg_nents); diff --git a/compat/compat-3.8.c b/compat/compat-3.8.c new file mode 100644 index 0000000..ff9cd49 --- /dev/null +++ b/compat/compat-3.8.c @@ -0,0 +1,455 @@ +/* + * Copyright (c) 1999 Andreas Gal + * Copyright (c) 2000-2005 Vojtech Pavlik + * Copyright (c) 2005 Michael Haboustak for Concept2, Inc + * Copyright (c) 2006-2012 Jiri Kosina + * Copyright (c) 2012 Luis R. Rodriguez + * + * Backport functionality introduced in Linux 3.8. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "hid-ids.h" +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_LESS(3,7,8) +void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops) +{ + if (!dev->ethtool_ops) + dev->ethtool_ops = ops; +} +EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); +#endif + +/* a list of devices that shouldn't be handled by HID core at all */ +static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, +#if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE) + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, +#endif + { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) }, + { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, + { } +}; + +/** + * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer + * + * There are composite devices for which we want to ignore only a certain + * interface. This is a list of devices for which only the mouse interface will + * be ignored. This allows a dedicated driver to take care of the interface. + */ +static const struct hid_device_id hid_mouse_ignore_list[] = { + /* appletouch driver */ + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { } +}; + +static bool hid_match_one_id(struct hid_device *hdev, + const struct hid_device_id *id) +{ + return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && +#if LINUX_VERSION_IS_GEQ(3,8,0) + (id->group == HID_GROUP_ANY || id->group == hdev->group) && +#endif + (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && + (id->product == HID_ANY_ID || id->product == hdev->product); +} + +#define hid_match_id LINUX_BACKPORT(hid_match_id) +static const struct hid_device_id * +hid_match_id(struct hid_device *hdev, const struct hid_device_id *id) +{ + for (; id->bus; id++) + if (hid_match_one_id(hdev, id)) + return id; + + return NULL; +} + +bool hid_ignore(struct hid_device *hdev) +{ + if (hdev->quirks & HID_QUIRK_NO_IGNORE) + return false; + if (hdev->quirks & HID_QUIRK_IGNORE) + return true; + + switch (hdev->vendor) { + case USB_VENDOR_ID_CODEMERCS: + /* ignore all Code Mercenaries IOWarrior devices */ + if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST && + hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST) + return true; + break; + case USB_VENDOR_ID_LOGITECH: + if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST && + hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST) + return true; + /* + * The Keene FM transmitter USB device has the same USB ID as + * the Logitech AudioHub Speaker, but it should ignore the hid. + * Check if the name is that of the Keene device. + * For reference: the name of the AudioHub is + * "HOLTEK AudioHub Speaker". + */ + if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB && + !strcmp(hdev->name, "HOLTEK B-LINK USB Audio ")) + return true; + break; + case USB_VENDOR_ID_SOUNDGRAPH: + if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST && + hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST) + return true; + break; + case USB_VENDOR_ID_HANWANG: + if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST && + hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST) + return true; + break; + case USB_VENDOR_ID_JESS: + if (hdev->product == USB_DEVICE_ID_JESS_YUREX && + hdev->type == HID_TYPE_USBNONE) + return true; + break; + case USB_VENDOR_ID_DWAV: + /* These are handled by usbtouchscreen. hdev->type is probably + * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match + * usbtouchscreen. */ + if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER || + hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) && + hdev->type != HID_TYPE_USBMOUSE) + return true; + break; + } + + if (hdev->type == HID_TYPE_USBMOUSE && + hid_match_id(hdev, hid_mouse_ignore_list)) + return true; + + return !!hid_match_id(hdev, hid_ignore_list); +} +EXPORT_SYMBOL_GPL(hid_ignore); + +/** + * prandom_bytes - get the requested number of pseudo-random bytes + * @buf: where to copy the pseudo-random bytes to + * @bytes: the requested number of bytes + */ +void prandom_bytes(void *buf, int bytes) +{ + unsigned char *p = buf; + int i; + + for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { + u32 random = random32(); + int j; + + for (j = 0; j < sizeof(u32); j++) { + p[i + j] = random; + random >>= BITS_PER_BYTE; + } + } + + if (i < bytes) { + u32 random = random32(); + + for (; i < bytes; i++) { + p[i] = random; + random >>= BITS_PER_BYTE; + } + } +} +EXPORT_SYMBOL_GPL(prandom_bytes); + +#ifdef CONFIG_OF +/** + * of_property_read_u8_array - Find and read an array of u8 from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 8-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * dts entry of array should be like: + * property = /bits/ 8 <0x50 0x60 0x70>; + * + * The out_values is modified only if a valid u8 value can be decoded. + */ +int of_property_read_u8_array(const struct device_node *np, + const char *propname, u8 *out_values, size_t sz) +{ + const u8 *val = of_find_property_value_of_size(np, propname, + (sz * sizeof(*out_values))); + + if (IS_ERR(val)) + return PTR_ERR(val); + + while (sz--) + *out_values++ = *val++; + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u8_array); +#endif /* CONFIG_OF */ + +#ifdef CONFIG_PCI_IOV +/** + * pci_sriov_set_totalvfs -- reduce the TotalVFs available + * @dev: the PCI PF device + * @numvfs: number that should be used for TotalVFs supported + * + * Should be called from PF driver's probe routine with + * device's mutex held. + * + * Returns 0 if PF is an SRIOV-capable device and + * value of numvfs valid. If not a PF return -ENOSYS; + * if numvfs is invalid return -EINVAL; + * if VFs already enabled, return -EBUSY. + */ +int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) +{ + if (!dev->is_physfn) + return -ENOSYS; + if (numvfs > dev->sriov->total_VFs) + return -EINVAL; + + /* Shouldn't change if VFs already enabled */ + if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) + return -EBUSY; + else + dev->sriov->driver_max_VFs = numvfs; + + return 0; +} +EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); +#endif /* CONFIG_PCI_IOV */ diff --git a/compat/compat-3.9.c b/compat/compat-3.9.c new file mode 100644 index 0000000..93889b5 --- /dev/null +++ b/compat/compat-3.9.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2013 Luis R. Rodriguez + * + * Backport functionality introduced in Linux 3.9. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) +{ + void __iomem *dest_ptr; + + dest_ptr = devm_ioremap_resource(dev, res); + if (!dest_ptr) + return (void __iomem *)ERR_PTR(-ENOMEM); + return dest_ptr; +} +EXPORT_SYMBOL_GPL(devm_ioremap_resource); + +/** + * eth_prepare_mac_addr_change - prepare for mac change + * @dev: network device + * @p: socket address + */ +int eth_prepare_mac_addr_change(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) + return -EBUSY; + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + return 0; +} +EXPORT_SYMBOL_GPL(eth_prepare_mac_addr_change); + +/** + * eth_commit_mac_addr_change - commit mac change + * @dev: network device + * @p: socket address + */ +void eth_commit_mac_addr_change(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); +} +EXPORT_SYMBOL_GPL(eth_commit_mac_addr_change); + +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix) +{ + static const char msg[] = "inet_frag_find: Fragment hash bucket" + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) + ". Dropping fragment.\n"; + + if (PTR_ERR(q) == -ENOBUFS) + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); +} +EXPORT_SYMBOL_GPL(inet_frag_maybe_warn_overflow); + +void __sg_page_iter_start(struct sg_page_iter *piter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset) +{ + piter->__pg_advance = 0; + piter->__nents = nents; + + piter->page = NULL; + piter->sg = sglist; + piter->sg_pgoffset = pgoffset; +} +EXPORT_SYMBOL_GPL(__sg_page_iter_start); + +static int sg_page_count(struct scatterlist *sg) +{ + return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; +} + +bool __sg_page_iter_next(struct sg_page_iter *piter) +{ + if (!piter->__nents || !piter->sg) + return false; + + piter->sg_pgoffset += piter->__pg_advance; + piter->__pg_advance = 1; + + while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { + piter->sg_pgoffset -= sg_page_count(piter->sg); + piter->sg = sg_next(piter->sg); + if (!--piter->__nents || !piter->sg) + return false; + } + piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); + + return true; +} +EXPORT_SYMBOL_GPL(__sg_page_iter_next); + +static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) +{ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) + return false; + + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; + + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + + return true; +} + +/** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void backport_sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + + __sg_page_iter_start(&miter->piter, sgl, nents, 0); + WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); + miter->__flags = flags; +} +EXPORT_SYMBOL_GPL(backport_sg_miter_start); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. + * + * Context: + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool backport_sg_miter_next(struct sg_mapping_iter *miter) +{ + sg_miter_stop(miter); + + /* + * Get to the next page if necessary. + * __remaining, __offset is adjusted by sg_miter_stop + */ + if (!sg_miter_get_next_page(miter)) + return false; + + miter->page = sg_page_iter_page(&miter->piter); + miter->consumed = miter->length = miter->__remaining; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page) + miter->__offset; + else + miter->addr = kmap(miter->page) + miter->__offset; + + return true; +} +EXPORT_SYMBOL_GPL(backport_sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * using sg_miter_start(). A stopped iteration can be resumed by + * calling sg_miter_next() on it. This is useful when resources (kmap) + * need to be released during iteration. + * + * Context: + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. + */ +void backport_sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + miter->__remaining -= miter->consumed; + + if ((miter->__flags & SG_MITER_TO_SG) && + !PageSlab(miter->page)) + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON_ONCE(preemptible()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL_GPL(backport_sg_miter_stop); diff --git a/compat/crypto-ccm.c b/compat/crypto-ccm.c new file mode 100644 index 0000000..f208c94 --- /dev/null +++ b/compat/crypto-ccm.c @@ -0,0 +1,1073 @@ +/* + * CCM: Counter with CBC-MAC + * + * (C) Copyright IBM Corp. 2007 - Joy Latten + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_IS_LESS(3,13,0) +/* consider properly backporting this? */ +static int crypto_memneq(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + /* OPTIMIZER_HIDE_VAR(neq); */ + barrier(); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + /* OPTIMIZER_HIDE_VAR(neq); */ + barrier(); + a += 1; + b += 1; + size -= 1; + } + return neq != 0UL ? 1 : 0; +} +#endif + +/* from internal.h */ +struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn mac; +}; + +struct crypto_ccm_ctx { + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_rfc4309_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 flags; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct skcipher_request skreq; +}; + +struct cbcmac_tfm_ctx { + struct crypto_cipher *child; +}; + +struct cbcmac_desc_ctx { + unsigned int len; +}; + +static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( + struct aead_request *req) +{ + unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); +} + +static int set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_skcipher *ctr = ctx->ctr; + struct crypto_ahash *mac = ctx->mac; + int err = 0; + + crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(ctr, key, keylen); + crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & + CRYPTO_TFM_RES_MASK); + if (err) + goto out; + + crypto_ahash_clear_flags(mac, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(mac, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(mac, key, keylen); + crypto_aead_set_flags(aead, crypto_ahash_get_flags(mac) & + CRYPTO_TFM_RES_MASK); + +out: + return err; +} + +static int crypto_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int format_input(u8 *info, struct aead_request *req, + unsigned int cryptlen) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int lp = req->iv[0]; + unsigned int l = lp + 1; + unsigned int m; + + m = crypto_aead_authsize(aead); + + memcpy(info, req->iv, 16); + + /* format control info per RFC 3610 and + * NIST Special Publication 800-38C + */ + *info |= (8 * ((m - 2) / 2)); + if (req->assoclen) + *info |= 64; + + return set_msg_len(info + 16 - l, cryptlen, l); +} + +static int format_adata(u8 *adata, unsigned int a) +{ + int len = 0; + + /* add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (a < 65280) { + *(__be16 *)adata = cpu_to_be16(a); + len = 2; + } else { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(a); + len = 6; + } + + return len; +} + +static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, + unsigned int cryptlen) +{ + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + AHASH_REQUEST_ON_STACK(ahreq, ctx->mac); + unsigned int assoclen = req->assoclen; + struct scatterlist sg[3]; + u8 *odata = pctx->odata; + u8 *idata = pctx->idata; + int ilen, err; + + /* format control data for input */ + err = format_input(odata, req, cryptlen); + if (err) + goto out; + + sg_init_table(sg, 3); + sg_set_buf(&sg[0], odata, 16); + + /* format associated data and compute into mac */ + if (assoclen) { + ilen = format_adata(idata, assoclen); + sg_set_buf(&sg[1], idata, ilen); + sg_chain(sg, 3, req->src); + } else { + ilen = 0; + sg_chain(sg, 2, req->src); + } + + ahash_request_set_tfm(ahreq, ctx->mac); + ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL); + ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16); + err = crypto_ahash_init(ahreq); + if (err) + goto out; + err = crypto_ahash_update(ahreq); + if (err) + goto out; + + /* we need to pad the MAC input to a round multiple of the block size */ + ilen = 16 - (assoclen + ilen) % 16; + if (ilen < 16) { + memset(idata, 0, ilen); + sg_init_table(sg, 2); + sg_set_buf(&sg[0], idata, ilen); + if (plain) + sg_chain(sg, 2, plain); + plain = sg; + cryptlen += ilen; + } + + ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen); + err = crypto_ahash_finup(ahreq); +out: + return err; +} + +static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + u8 *odata = pctx->odata; + + if (!err) + scatterwalk_map_and_copy(odata, req->dst, + req->assoclen + req->cryptlen, + crypto_aead_authsize(aead), 1); + aead_request_complete(req, err); +} + +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (1 > iv[0] || iv[0] > 7) + return -EINVAL; + + return 0; +} + +static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag) +{ + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct scatterlist *sg; + u8 *iv = req->iv; + int err; + + err = crypto_ccm_check_iv(iv); + if (err) + return err; + + pctx->flags = aead_request_flags(req); + + /* Note: rfc 3610 and NIST 800-38C require counter of + * zero to encrypt auth tag. + */ + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + sg_init_table(pctx->src, 3); + sg_set_buf(pctx->src, tag, 16); + sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); + if (sg != pctx->src + 1) + sg_chain(pctx->src, 2, sg); + + if (req->src != req->dst) { + sg_init_table(pctx->dst, 3); + sg_set_buf(pctx->dst, tag, 16); + sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); + if (sg != pctx->dst + 1) + sg_chain(pctx->dst, 2, sg); + } + + return 0; +} + +static int crypto_ccm_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct skcipher_request *skreq = &pctx->skreq; + struct scatterlist *dst; + unsigned int cryptlen = req->cryptlen; + u8 *odata = pctx->odata; + u8 *iv = req->iv; + int err; + + err = crypto_ccm_init_crypt(req, odata); + if (err) + return err; + + err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen); + if (err) + return err; + + dst = pctx->src; + if (req->src != req->dst) + dst = pctx->dst; + + skcipher_request_set_tfm(skreq, ctx->ctr); + skcipher_request_set_callback(skreq, pctx->flags, + crypto_ccm_encrypt_done, req); + skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_skcipher_encrypt(skreq); + if (err) + return err; + + /* copy authtag to end of dst */ + scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen, + crypto_aead_authsize(aead), 1); + return err; +} + +static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen - authsize; + struct scatterlist *dst; + + pctx->flags = 0; + + dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst); + + if (!err) { + err = crypto_ccm_auth(req, dst, cryptlen); + if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) + err = -EBADMSG; + } + aead_request_complete(req, err); +} + +static int crypto_ccm_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct skcipher_request *skreq = &pctx->skreq; + struct scatterlist *dst; + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen; + u8 *authtag = pctx->auth_tag; + u8 *odata = pctx->odata; + u8 *iv = req->iv; + int err; + + cryptlen -= authsize; + + err = crypto_ccm_init_crypt(req, authtag); + if (err) + return err; + + scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen, + authsize, 0); + + dst = pctx->src; + if (req->src != req->dst) + dst = pctx->dst; + + skcipher_request_set_tfm(skreq, ctx->ctr); + skcipher_request_set_callback(skreq, pctx->flags, + crypto_ccm_decrypt_done, req); + skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_skcipher_decrypt(skreq); + if (err) + return err; + + err = crypto_ccm_auth(req, sg_next(dst), cryptlen); + if (err) + return err; + + /* verify */ + if (crypto_memneq(authtag, odata, authsize)) + return -EBADMSG; + + return err; +} + +static int crypto_ccm_init_tfm(struct crypto_aead *tfm) +{ + struct aead_instance *inst = aead_alg_instance(tfm); + struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_ahash *mac; + struct crypto_skcipher *ctr; + unsigned long align; + int err; + + mac = crypto_spawn_ahash(&ictx->mac); + if (IS_ERR(mac)) + return PTR_ERR(mac); + + ctr = crypto_spawn_skcipher(&ictx->ctr); + err = PTR_ERR(ctr); + if (IS_ERR(ctr)) + goto err_free_mac; + + ctx->mac = mac; + ctx->ctr = ctr; + + align = crypto_aead_alignmask(tfm); + align &= ~(crypto_tfm_ctx_alignment() - 1); + crypto_aead_set_reqsize( + tfm, + align + sizeof(struct crypto_ccm_req_priv_ctx) + + crypto_skcipher_reqsize(ctr)); + + return 0; + +err_free_mac: + crypto_free_ahash(mac); + return err; +} + +static void crypto_ccm_exit_tfm(struct crypto_aead *tfm) +{ + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_ahash(ctx->mac); + crypto_free_skcipher(ctx->ctr); +} + +static void crypto_ccm_free(struct aead_instance *inst) +{ + struct ccm_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_ahash(&ctx->mac); + crypto_drop_skcipher(&ctx->ctr); + kfree(inst); +} + +static int crypto_ccm_create_common(struct crypto_template *tmpl, + struct rtattr **tb, + const char *full_name, + const char *ctr_name, + const char *mac_name) +{ + struct crypto_attr_type *algt; + struct aead_instance *inst; + struct skcipher_alg *ctr; + struct crypto_alg *mac_alg; + struct hash_alg_common *mac; + struct ccm_instance_ctx *ictx; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return -EINVAL; + + mac_alg = crypto_find_alg(mac_name, &crypto_ahash_type, + CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_AHASH_MASK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(mac_alg)) + return PTR_ERR(mac_alg); + + mac = __crypto_hash_alg_common(mac_alg); + err = -EINVAL; + if (mac->digestsize != 16) + goto out_put_mac; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + err = -ENOMEM; + if (!inst) + goto out_put_mac; + + ictx = aead_instance_ctx(inst); + err = crypto_init_ahash_spawn(&ictx->mac, mac, + aead_crypto_instance(inst)); + if (err) + goto err_free_inst; + + crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); + err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_drop_mac; + + ctr = crypto_spawn_skcipher_alg(&ictx->ctr); + + /* Not a stream cipher? */ + err = -EINVAL; + if (ctr->base.cra_blocksize != 1) + goto err_drop_ctr; + + /* We want the real thing! */ + if (crypto_skcipher_alg_ivsize(ctr) != 16) + goto err_drop_ctr; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "ccm_base(%s,%s)", ctr->base.cra_driver_name, + mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ctr; + + memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = (mac->base.cra_priority + + ctr->base.cra_priority) / 2; + inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_alignmask = mac->base.cra_alignmask | + ctr->base.cra_alignmask; + inst->alg.ivsize = 16; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.maxauthsize = 16; + inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); + inst->alg.init = crypto_ccm_init_tfm; + inst->alg.exit = crypto_ccm_exit_tfm; + inst->alg.setkey = crypto_ccm_setkey; + inst->alg.setauthsize = crypto_ccm_setauthsize; + inst->alg.encrypt = crypto_ccm_encrypt; + inst->alg.decrypt = crypto_ccm_decrypt; + + inst->free = crypto_ccm_free; + + err = aead_register_instance(tmpl, inst); + if (err) + goto err_drop_ctr; + +out_put_mac: + crypto_mod_put(mac_alg); + return err; + +err_drop_ctr: + crypto_drop_skcipher(&ictx->ctr); +err_drop_mac: + crypto_drop_ahash(&ictx->mac); +err_free_inst: + kfree(inst); + goto out_put_mac; +} + +static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *cipher_name; + char ctr_name[CRYPTO_MAX_ALG_NAME]; + char mac_name[CRYPTO_MAX_ALG_NAME]; + char full_name[CRYPTO_MAX_ALG_NAME]; + + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(mac_name, CRYPTO_MAX_ALG_NAME, "cbcmac(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, + mac_name); +} + +static struct crypto_template crypto_ccm_tmpl = { + .name = "ccm", + .create = crypto_ccm_create, + .module = THIS_MODULE, +}; + +static int crypto_ccm_base_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + const char *ctr_name; + const char *cipher_name; + char full_name[CRYPTO_MAX_ALG_NAME]; + + ctr_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(ctr_name)) + return PTR_ERR(ctr_name); + + cipher_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", + ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, + cipher_name); +} + +static struct crypto_template crypto_ccm_base_tmpl = { + .name = "ccm_base", + .create = crypto_ccm_base_create, + .module = THIS_MODULE, +}; + +static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); + struct crypto_aead *child = ctx->child; + int err; + + if (keylen < 3) + return -EINVAL; + + keylen -= 3; + memcpy(ctx->nonce, key + keylen, 3); + + crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(child, key, keylen); + crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); + + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) +{ + struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req); + struct aead_request *subreq = &rctx->subreq; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_aead *child = ctx->child; + struct scatterlist *sg; + u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), + crypto_aead_alignmask(child) + 1); + + /* L' */ + iv[0] = 3; + + memcpy(iv + 1, ctx->nonce, 3); + memcpy(iv + 4, req->iv, 8); + + scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0); + + sg_init_table(rctx->src, 3); + sg_set_buf(rctx->src, iv + 16, req->assoclen - 8); + sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); + if (sg != rctx->src + 1) + sg_chain(rctx->src, 2, sg); + + if (req->src != req->dst) { + sg_init_table(rctx->dst, 3); + sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8); + sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); + if (sg != rctx->dst + 1) + sg_chain(rctx->dst, 2, sg); + } + + aead_request_set_tfm(subreq, child); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, rctx->src, + req->src == req->dst ? rctx->src : rctx->dst, + req->cryptlen, iv); + aead_request_set_ad(subreq, req->assoclen - 8); + + return subreq; +} + +static int crypto_rfc4309_encrypt(struct aead_request *req) +{ + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + + req = crypto_rfc4309_crypt(req); + + return crypto_aead_encrypt(req); +} + +static int crypto_rfc4309_decrypt(struct aead_request *req) +{ + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + + req = crypto_rfc4309_crypt(req); + + return crypto_aead_decrypt(req); +} + +static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm) +{ + struct aead_instance *inst = aead_alg_instance(tfm); + struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_aead *aead; + unsigned long align; + + aead = crypto_spawn_aead(spawn); + if (IS_ERR(aead)) + return PTR_ERR(aead); + + ctx->child = aead; + + align = crypto_aead_alignmask(aead); + align &= ~(crypto_tfm_ctx_alignment() - 1); + crypto_aead_set_reqsize( + tfm, + sizeof(struct crypto_rfc4309_req_ctx) + + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + + align + 32); + + return 0; +} + +static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm) +{ + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static void crypto_rfc4309_free(struct aead_instance *inst) +{ + crypto_drop_aead(aead_instance_ctx(inst)); + kfree(inst); +} + +static int crypto_rfc4309_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; + const char *ccm_name; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return -EINVAL; + + ccm_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(ccm_name)) + return PTR_ERR(ccm_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = aead_instance_ctx(inst); + crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); + err = crypto_grab_aead(spawn, ccm_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + alg = crypto_spawn_aead_alg(spawn); + + err = -EINVAL; + + /* We only support 16-byte blocks. */ + if (crypto_aead_alg_ivsize(alg) != 16) + goto out_drop_alg; + + /* Not a stream cipher? */ + if (alg->base.cra_blocksize != 1) + goto out_drop_alg; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "rfc4309(%s)", alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc4309(%s)", alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; + + inst->alg.ivsize = 8; + inst->alg.chunksize = crypto_aead_alg_chunksize(alg); + inst->alg.maxauthsize = 16; + + inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); + + inst->alg.init = crypto_rfc4309_init_tfm; + inst->alg.exit = crypto_rfc4309_exit_tfm; + + inst->alg.setkey = crypto_rfc4309_setkey; + inst->alg.setauthsize = crypto_rfc4309_setauthsize; + inst->alg.encrypt = crypto_rfc4309_encrypt; + inst->alg.decrypt = crypto_rfc4309_decrypt; + + inst->free = crypto_rfc4309_free; + + err = aead_register_instance(tmpl, inst); + if (err) + goto out_drop_alg; + +out: + return err; + +out_drop_alg: + crypto_drop_aead(spawn); +out_free_inst: + kfree(inst); + goto out; +} + +static struct crypto_template crypto_rfc4309_tmpl = { + .name = "rfc4309", + .create = crypto_rfc4309_create, + .module = THIS_MODULE, +}; + +static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct cbcmac_tfm_ctx *ctx = crypto_shash_ctx(parent); + + return crypto_cipher_setkey(ctx->child, inkey, keylen); +} + +static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) +{ + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + int bs = crypto_shash_digestsize(pdesc->tfm); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs; + + ctx->len = 0; + memset(dg, 0, bs); + + return 0; +} + +static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, + unsigned int len) +{ + struct crypto_shash *parent = pdesc->tfm; + struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; + int bs = crypto_shash_digestsize(parent); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; + + while (len > 0) { + unsigned int l = min(len, bs - ctx->len); + + crypto_xor(dg + ctx->len, p, l); + ctx->len +=l; + len -= l; + p += l; + + if (ctx->len == bs) { + crypto_cipher_encrypt_one(tfm, dg, dg); + ctx->len = 0; + } + } + + return 0; +} + +static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) +{ + struct crypto_shash *parent = pdesc->tfm; + struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; + int bs = crypto_shash_digestsize(parent); + u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; + + if (ctx->len) + crypto_cipher_encrypt_one(tfm, dg, dg); + + memcpy(out, dg, bs); + return 0; +} + +static int cbcmac_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_cipher *cipher; + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + + return 0; +}; + +static void cbcmac_exit_tfm(struct crypto_tfm *tfm) +{ + struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); +} + +static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct shash_instance *inst; + struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + if (err) + return err; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + inst = shash_alloc_instance("cbcmac", alg); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto out_put_alg; + + err = crypto_init_spawn(shash_instance_ctx(inst), alg, + shash_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto out_free_inst; + + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = 1; + + inst->alg.digestsize = alg->cra_blocksize; + inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx), + alg->cra_alignmask + 1) + + alg->cra_blocksize; + + inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); + inst->alg.base.cra_init = cbcmac_init_tfm; + inst->alg.base.cra_exit = cbcmac_exit_tfm; + + inst->alg.init = crypto_cbcmac_digest_init; + inst->alg.update = crypto_cbcmac_digest_update; + inst->alg.final = crypto_cbcmac_digest_final; + inst->alg.setkey = crypto_cbcmac_digest_setkey; + + err = shash_register_instance(tmpl, inst); + +out_free_inst: + if (err) + shash_free_instance(shash_crypto_instance(inst)); + +out_put_alg: + crypto_mod_put(alg); + return err; +} + +static struct crypto_template crypto_cbcmac_tmpl = { + .name = "cbcmac", + .create = cbcmac_create, + .free = shash_free_instance, + .module = THIS_MODULE, +}; + +int __init crypto_ccm_module_init(void) +{ + int err; + + err = crypto_register_template(&crypto_cbcmac_tmpl); + if (err) + goto out; + + err = crypto_register_template(&crypto_ccm_base_tmpl); + if (err) + goto out_undo_cbcmac; + + err = crypto_register_template(&crypto_ccm_tmpl); + if (err) + goto out_undo_base; + + err = crypto_register_template(&crypto_rfc4309_tmpl); + if (err) + goto out_undo_ccm; + +out: + return err; + +out_undo_ccm: + crypto_unregister_template(&crypto_ccm_tmpl); +out_undo_base: + crypto_unregister_template(&crypto_ccm_base_tmpl); +out_undo_cbcmac: + crypto_register_template(&crypto_cbcmac_tmpl); + goto out; +} + +void __exit crypto_ccm_module_exit(void) +{ + crypto_unregister_template(&crypto_rfc4309_tmpl); + crypto_unregister_template(&crypto_ccm_tmpl); + crypto_unregister_template(&crypto_ccm_base_tmpl); + crypto_unregister_template(&crypto_cbcmac_tmpl); +} diff --git a/compat/crypto-skcipher.c b/compat/crypto-skcipher.c new file mode 100644 index 0000000..4359a5e --- /dev/null +++ b/compat/crypto-skcipher.c @@ -0,0 +1,997 @@ +/* + * Symmetric key cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers, handles operations across + * multiple page boundaries by using temporary blocks. In user context, + * the kernel is given a chance to schedule us once per page. + * + * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, + u32 mask); + +void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask); + +struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +enum { + SKCIPHER_WALK_PHYS = 1 << 0, + SKCIPHER_WALK_SLOW = 1 << 1, + SKCIPHER_WALK_COPY = 1 << 2, + SKCIPHER_WALK_DIFF = 1 << 3, + SKCIPHER_WALK_SLEEP = 1 << 4, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[]; +}; + +static int skcipher_walk_next(struct skcipher_walk *walk); + +static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) +{ + if (PageHighMem(scatterwalk_page(walk))) + kunmap_atomic(vaddr); +} + +static inline void *skcipher_map(struct scatter_walk *walk) +{ + struct page *page = scatterwalk_page(walk); + + return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + + offset_in_page(walk->offset); +} + +static inline void skcipher_map_src(struct skcipher_walk *walk) +{ + walk->src.virt.addr = skcipher_map(&walk->in); +} + +static inline void skcipher_map_dst(struct skcipher_walk *walk) +{ + walk->dst.virt.addr = skcipher_map(&walk->out); +} + +static inline void skcipher_unmap_src(struct skcipher_walk *walk) +{ + skcipher_unmap(&walk->in, walk->src.virt.addr); +} + +static inline void skcipher_unmap_dst(struct skcipher_walk *walk) +{ + skcipher_unmap(&walk->out, walk->dst.virt.addr); +} + +static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) +{ + return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + +/* Get a spot of the specified length that does not straddle a page. + * The caller needs to ensure that there is enough space for this operation. + */ +static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) +{ + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); + + return max(start, end_page); +} + +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + u8 *addr; + + addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); + addr = skcipher_get_spot(addr, bsize); + scatterwalk_copychunks(addr, &walk->out, bsize, + (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); + return 0; +} + +int skcipher_walk_done(struct skcipher_walk *walk, int err) +{ + unsigned int n = walk->nbytes - err; + unsigned int nbytes; + + nbytes = walk->total - n; + + if (unlikely(err < 0)) { + nbytes = 0; + n = 0; + } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | + SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { +unmap_src: + skcipher_unmap_src(walk); + } else if (walk->flags & SKCIPHER_WALK_DIFF) { + skcipher_unmap_dst(walk); + goto unmap_src; + } else if (walk->flags & SKCIPHER_WALK_COPY) { + skcipher_map_dst(walk); + memcpy(walk->dst.virt.addr, walk->page, n); + skcipher_unmap_dst(walk); + } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { + if (WARN_ON(err)) { + err = -EINVAL; + nbytes = 0; + } else + n = skcipher_done_slow(walk, n); + } + + if (err > 0) + err = 0; + + walk->total = nbytes; + walk->nbytes = nbytes; + + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); + + if (nbytes) { + crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? + CRYPTO_TFM_REQ_MAY_SLEEP : 0); + return skcipher_walk_next(walk); + } + + /* Short-circuit for the common/fast path. */ + if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) + goto out; + + if (walk->flags & SKCIPHER_WALK_PHYS) + goto out; + + if (walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + +out: + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_done); + +void skcipher_walk_complete(struct skcipher_walk *walk, int err) +{ + struct skcipher_walk_buffer *p, *tmp; + + list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { + u8 *data; + + if (err) + goto done; + + data = p->data; + if (!data) { + data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); + data = skcipher_get_spot(data, walk->stride); + } + + scatterwalk_copychunks(data, &p->dst, p->len, 1); + + if (offset_in_page(p->data) + p->len + walk->stride > + PAGE_SIZE) + free_page((unsigned long)p->data); + +done: + list_del(&p->entry); + kfree(p); + } + + if (!err && walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); +} +EXPORT_SYMBOL_GPL(skcipher_walk_complete); + +static void skcipher_queue_write(struct skcipher_walk *walk, + struct skcipher_walk_buffer *p) +{ + p->dst = walk->out; + list_add_tail(&p->entry, &walk->buffers); +} + +static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + bool phys = walk->flags & SKCIPHER_WALK_PHYS; + unsigned alignmask = walk->alignmask; + struct skcipher_walk_buffer *p; + unsigned a; + unsigned n; + u8 *buffer; + void *v; + + if (!phys) { + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (buffer) + goto ok; + } + + /* Start with the minimum alignment of kmalloc. */ + a = crypto_tfm_ctx_alignment() - 1; + n = bsize; + + if (phys) { + /* Calculate the minimum alignment of p->buffer. */ + a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; + n += sizeof(*p); + } + + /* Minimum size to align p->buffer by alignmask. */ + n += alignmask & ~a; + + /* Minimum size to ensure p->buffer does not straddle a page. */ + n += (bsize - 1) & ~(alignmask | a); + + v = kzalloc(n, skcipher_walk_gfp(walk)); + if (!v) + return skcipher_walk_done(walk, -ENOMEM); + + if (phys) { + p = v; + p->len = bsize; + skcipher_queue_write(walk, p); + buffer = p->buffer; + } else { + walk->buffer = v; + buffer = v; + } + +ok: + walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); + walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); + walk->src.virt.addr = walk->dst.virt.addr; + + scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); + + walk->nbytes = bsize; + walk->flags |= SKCIPHER_WALK_SLOW; + + return 0; +} + +static int skcipher_next_copy(struct skcipher_walk *walk) +{ + struct skcipher_walk_buffer *p; + u8 *tmp = walk->page; + + skcipher_map_src(walk); + memcpy(tmp, walk->src.virt.addr, walk->nbytes); + skcipher_unmap_src(walk); + + walk->src.virt.addr = tmp; + walk->dst.virt.addr = tmp; + + if (!(walk->flags & SKCIPHER_WALK_PHYS)) + return 0; + + p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); + if (!p) + return -ENOMEM; + + p->data = walk->page; + p->len = walk->nbytes; + skcipher_queue_write(walk, p); + + if (offset_in_page(walk->page) + walk->nbytes + walk->stride > + PAGE_SIZE) + walk->page = NULL; + else + walk->page += walk->nbytes; + + return 0; +} + +static int skcipher_next_fast(struct skcipher_walk *walk) +{ + unsigned long diff; + + walk->src.phys.page = scatterwalk_page(&walk->in); + walk->src.phys.offset = offset_in_page(walk->in.offset); + walk->dst.phys.page = scatterwalk_page(&walk->out); + walk->dst.phys.offset = offset_in_page(walk->out.offset); + + if (walk->flags & SKCIPHER_WALK_PHYS) + return 0; + + diff = walk->src.phys.offset - walk->dst.phys.offset; + diff |= walk->src.virt.page - walk->dst.virt.page; + + skcipher_map_src(walk); + walk->dst.virt.addr = walk->src.virt.addr; + + if (diff) { + walk->flags |= SKCIPHER_WALK_DIFF; + skcipher_map_dst(walk); + } + + return 0; +} + +static int skcipher_walk_next(struct skcipher_walk *walk) +{ + unsigned int bsize; + unsigned int n; + int err; + + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); + + n = walk->total; + bsize = min(walk->stride, max(n, walk->blocksize)); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + if (unlikely(walk->total < walk->blocksize)) + return skcipher_walk_done(walk, -EINVAL); + +slow_path: + err = skcipher_next_slow(walk, bsize); + goto set_phys_lowmem; + } + + if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { + if (!walk->page) { + gfp_t gfp = skcipher_walk_gfp(walk); + + walk->page = (void *)__get_free_page(gfp); + if (!walk->page) + goto slow_path; + } + + walk->nbytes = min_t(unsigned, n, + PAGE_SIZE - offset_in_page(walk->page)); + walk->flags |= SKCIPHER_WALK_COPY; + err = skcipher_next_copy(walk); + goto set_phys_lowmem; + } + + walk->nbytes = n; + + return skcipher_next_fast(walk); + +set_phys_lowmem: + if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { + walk->src.phys.page = virt_to_page(walk->src.virt.addr); + walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); + walk->src.phys.offset &= PAGE_SIZE - 1; + walk->dst.phys.offset &= PAGE_SIZE - 1; + } + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_next); + +static int skcipher_copy_iv(struct skcipher_walk *walk) +{ + unsigned a = crypto_tfm_ctx_alignment() - 1; + unsigned alignmask = walk->alignmask; + unsigned ivsize = walk->ivsize; + unsigned bs = walk->stride; + unsigned aligned_bs; + unsigned size; + u8 *iv; + + aligned_bs = ALIGN(bs, alignmask); + + /* Minimum size to align buffer by alignmask. */ + size = alignmask & ~a; + + if (walk->flags & SKCIPHER_WALK_PHYS) + size += ivsize; + else { + size += aligned_bs + ivsize; + + /* Minimum size to ensure buffer does not straddle a page. */ + size += (bs - 1) & ~(alignmask | a); + } + + walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); + if (!walk->buffer) + return -ENOMEM; + + iv = PTR_ALIGN(walk->buffer, alignmask + 1); + iv = skcipher_get_spot(iv, bs) + aligned_bs; + + walk->iv = memcpy(iv, walk->iv, walk->ivsize); + return 0; +} + +static int skcipher_walk_first(struct skcipher_walk *walk) +{ + walk->nbytes = 0; + + if (WARN_ON_ONCE(in_irq())) + return -EDEADLK; + + if (unlikely(!walk->total)) + return 0; + + walk->buffer = NULL; + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = skcipher_copy_iv(walk); + if (err) + return err; + } + + walk->page = NULL; + walk->nbytes = walk->total; + + return skcipher_walk_next(walk); +} + +static int skcipher_walk_skcipher(struct skcipher_walk *walk, + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + scatterwalk_start(&walk->in, req->src); + scatterwalk_start(&walk->out, req->dst); + + walk->total = req->cryptlen; + walk->iv = req->iv; + walk->oiv = req->iv; + + walk->flags &= ~SKCIPHER_WALK_SLEEP; + walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + SKCIPHER_WALK_SLEEP : 0; + + walk->blocksize = crypto_skcipher_blocksize(tfm); + walk->stride = crypto_skcipher_walksize(tfm); + walk->ivsize = crypto_skcipher_ivsize(tfm); + walk->alignmask = crypto_skcipher_alignmask(tfm); + + return skcipher_walk_first(walk); +} + +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, bool atomic) +{ + int err; + + walk->flags &= ~SKCIPHER_WALK_PHYS; + + err = skcipher_walk_skcipher(walk, req); + + walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; + + return err; +} +EXPORT_SYMBOL_GPL(skcipher_walk_virt); + +void skcipher_walk_atomise(struct skcipher_walk *walk) +{ + walk->flags &= ~SKCIPHER_WALK_SLEEP; +} +EXPORT_SYMBOL_GPL(skcipher_walk_atomise); + +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req) +{ + walk->flags |= SKCIPHER_WALK_PHYS; + + INIT_LIST_HEAD(&walk->buffers); + + return skcipher_walk_skcipher(walk, req); +} +EXPORT_SYMBOL_GPL(skcipher_walk_async); + +static int skcipher_walk_aead_common(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + int err; + + walk->flags &= ~SKCIPHER_WALK_PHYS; + + scatterwalk_start(&walk->in, req->src); + scatterwalk_start(&walk->out, req->dst); + + scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); + scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); + + walk->iv = req->iv; + walk->oiv = req->iv; + + if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) + walk->flags |= SKCIPHER_WALK_SLEEP; + else + walk->flags &= ~SKCIPHER_WALK_SLEEP; + + walk->blocksize = crypto_aead_blocksize(tfm); + walk->stride = crypto_aead_chunksize(tfm); + walk->ivsize = crypto_aead_ivsize(tfm); + walk->alignmask = crypto_aead_alignmask(tfm); + + err = skcipher_walk_first(walk); + + if (atomic) + walk->flags &= ~SKCIPHER_WALK_SLEEP; + + return err; +} + +int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, + bool atomic) +{ + walk->total = req->cryptlen; + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead); + +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + walk->total = req->cryptlen; + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); + +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + walk->total = req->cryptlen - crypto_aead_authsize(tfm); + + return skcipher_walk_aead_common(walk, req, atomic); +} +EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); + +static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type == &crypto_blkcipher_type) + return sizeof(struct crypto_blkcipher *); + + if (alg->cra_type == &crypto_ablkcipher_type || + alg->cra_type == &crypto_givcipher_type) + return sizeof(struct crypto_ablkcipher *); + + return crypto_alg_extsize(alg); +} + +static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); + struct crypto_blkcipher *blkcipher = *ctx; + int err; + + crypto_blkcipher_clear_flags(blkcipher, ~0); + crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_blkcipher_setkey(blkcipher, key, keylen); + crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int skcipher_crypt_blkcipher(struct skcipher_request *req, + int (*crypt)(struct blkcipher_desc *, + struct scatterlist *, + struct scatterlist *, + unsigned int)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); + struct blkcipher_desc desc = { + .tfm = *ctx, + .info = req->iv, + .flags = req->base.flags, + }; + + + return crypt(&desc, req->dst, req->src, req->cryptlen); +} + +static int skcipher_encrypt_blkcipher(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + return skcipher_crypt_blkcipher(req, alg->encrypt); +} + +static int skcipher_decrypt_blkcipher(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + return skcipher_crypt_blkcipher(req, alg->decrypt); +} + +static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) +{ + struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_blkcipher(*ctx); +} + +static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_blkcipher *blkcipher; + struct crypto_tfm *btfm; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(btfm)) { + crypto_mod_put(calg); + return PTR_ERR(btfm); + } + + blkcipher = __crypto_blkcipher_cast(btfm); + *ctx = blkcipher; + tfm->exit = crypto_exit_skcipher_ops_blkcipher; + + skcipher->setkey = skcipher_setkey_blkcipher; + skcipher->encrypt = skcipher_encrypt_blkcipher; + skcipher->decrypt = skcipher_decrypt_blkcipher; + + skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); + skcipher->keysize = calg->cra_blkcipher.max_keysize; + + return 0; +} + +static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); + struct crypto_ablkcipher *ablkcipher = *ctx; + int err; + + crypto_ablkcipher_clear_flags(ablkcipher, ~0); + crypto_ablkcipher_set_flags(ablkcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); + crypto_skcipher_set_flags(tfm, + crypto_ablkcipher_get_flags(ablkcipher) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int skcipher_crypt_ablkcipher(struct skcipher_request *req, + int (*crypt)(struct ablkcipher_request *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); + struct ablkcipher_request *subreq = skcipher_request_ctx(req); + + ablkcipher_request_set_tfm(subreq, *ctx); + ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + + return crypt(subreq); +} + +static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + + return skcipher_crypt_ablkcipher(req, alg->encrypt); +} + +static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + + return skcipher_crypt_ablkcipher(req, alg->decrypt); +} + +static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) +{ + struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_ablkcipher(*ctx); +} + +static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_ablkcipher *ablkcipher; + struct crypto_tfm *abtfm; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + abtfm = __crypto_alloc_tfm(calg, 0, 0); + if (IS_ERR(abtfm)) { + crypto_mod_put(calg); + return PTR_ERR(abtfm); + } + + ablkcipher = __crypto_ablkcipher_cast(abtfm); + *ctx = ablkcipher; + tfm->exit = crypto_exit_skcipher_ops_ablkcipher; + + skcipher->setkey = skcipher_setkey_ablkcipher; + skcipher->encrypt = skcipher_encrypt_ablkcipher; + skcipher->decrypt = skcipher_decrypt_ablkcipher; + + skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); + skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + + sizeof(struct ablkcipher_request); + skcipher->keysize = calg->cra_ablkcipher.max_keysize; + + return 0; +} + +static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + + alg->exit(skcipher); +} + +static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + + if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) + return crypto_init_skcipher_ops_blkcipher(tfm); + + if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || + tfm->__crt_alg->cra_type == &crypto_givcipher_type) + return crypto_init_skcipher_ops_ablkcipher(tfm); + + skcipher->setkey = alg->setkey; + skcipher->encrypt = alg->encrypt; + skcipher->decrypt = alg->decrypt; + skcipher->ivsize = alg->ivsize; + skcipher->keysize = alg->max_keysize; + + if (alg->exit) + skcipher->base.exit = crypto_skcipher_exit_tfm; + + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_skcipher_free_instance(struct crypto_instance *inst) +{ + struct skcipher_instance *skcipher = + container_of(inst, struct skcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) + __maybe_unused; +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + seq_printf(m, "type : skcipher\n"); + seq_printf(m, "async : %s\n", + alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->chunksize); + seq_printf(m, "walksize : %u\n", skcipher->walksize); +} + +#ifdef CONFIG_NET +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_blkcipher rblkcipher; + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->min_keysize; + rblkcipher.max_keysize = skcipher->max_keysize; + rblkcipher.ivsize = skcipher->ivsize; + + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static const struct crypto_type crypto_skcipher_type2 = { + .extsize = crypto_skcipher_extsize, + .init_tfm = crypto_skcipher_init_tfm, + .free = crypto_skcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_skcipher_show, +#endif + .report = crypto_skcipher_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .tfmsize = offsetof(struct crypto_skcipher, base), +}; + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_skcipher_type2; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_skcipher); + +struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, + u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); + +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, + type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_skcipher2); + +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || + alg->walksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->chunksize) + alg->chunksize = base->cra_blocksize; + if (!alg->walksize) + alg->walksize = alg->chunksize; + + base->cra_type = &crypto_skcipher_type2; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + +int crypto_register_skcipher(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_skcipher); + +void crypto_unregister_skcipher(struct skcipher_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); + +int crypto_register_skciphers(struct skcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_skciphers); + +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); + +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst) +{ + int err; + + err = skcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(skcipher_register_instance); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/compat/drivers-base-devcoredump.c b/compat/drivers-base-devcoredump.c new file mode 100644 index 0000000..0c7b7d3 --- /dev/null +++ b/compat/drivers-base-devcoredump.c @@ -0,0 +1,386 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * Author: Johannes Berg + */ +#include +#include +#include +#include +#include +#include +#include +#include "backports.h" + +static struct class devcd_class; + +/* global disable flag, for security purposes */ +static bool devcd_disabled; + +/* if data isn't read by userspace after 5 minutes then delete it */ +#define DEVCD_TIMEOUT (HZ * 60 * 5) + +#if LINUX_VERSION_IS_LESS(3,11,0) +static struct bin_attribute devcd_attr_data; +#endif + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + struct module *owner; + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen); + void (*free)(void *data); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +static struct devcd_entry *dev_to_devcd(struct device *dev) +{ + return container_of(dev, struct devcd_entry, devcd_dev); +} + +static void devcd_dev_release(struct device *dev) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + devcd->free(devcd->data); + module_put(devcd->owner); + + /* + * this seems racy, but I don't see a notifier or such on + * a struct device to know when it goes away? + */ + if (devcd->failing_dev->kobj.sd) + sysfs_remove_link(&devcd->failing_dev->kobj, "devcoredump"); + + put_device(devcd->failing_dev); + kfree(devcd); +} + +static void devcd_del(struct work_struct *wk) +{ + struct devcd_entry *devcd; + + devcd = container_of(wk, struct devcd_entry, del_wk.work); + +#if LINUX_VERSION_IS_LESS(3,11,0) + device_remove_bin_file(&devcd->devcd_dev, &devcd_attr_data); +#endif + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + +static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); +} + +static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + mod_delayed_work(system_wq, &devcd->del_wk, 0); + + return count; +} + +static struct bin_attribute devcd_attr_data = { + .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, + .size = 0, + .read = devcd_data_read, + .write = devcd_data_write, +}; + +#if LINUX_VERSION_IS_GEQ(3,11,0) +static struct bin_attribute *devcd_dev_bin_attrs[] = { + &devcd_attr_data, NULL, +}; + +static const struct attribute_group devcd_dev_group = { + .bin_attrs = devcd_dev_bin_attrs, +}; + +static const struct attribute_group *devcd_dev_groups[] = { + &devcd_dev_group, NULL, +}; +#endif /* LINUX_VERSION_IS_GEQ(3,11,0) */ + +static int devcd_free(struct device *dev, void *data) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + flush_delayed_work(&devcd->del_wk); + return 0; +} + +static ssize_t disabled_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", devcd_disabled); +} + +static ssize_t disabled_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + long tmp = simple_strtol(buf, NULL, 10); + + /* + * This essentially makes the attribute write-once, since you can't + * go back to not having it disabled. This is intentional, it serves + * as a system lockdown feature. + */ + if (tmp != 1) + return -EINVAL; + + devcd_disabled = true; + + class_for_each_device(&devcd_class, NULL, NULL, devcd_free); + + return count; +} +static CLASS_ATTR_RW(disabled); + +static struct attribute *devcd_class_attrs[] = { + &class_attr_disabled.attr, + NULL, +}; +ATTRIBUTE_GROUPS(devcd_class); + +static struct class devcd_class = { + .name = "devcoredump", + .owner = THIS_MODULE, + .dev_release = devcd_dev_release, +#if LINUX_VERSION_IS_GEQ(3,11,0) + .dev_groups = devcd_dev_groups, +#endif + .class_groups = devcd_class_groups, +}; + +static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen) +{ + if (offset > datalen) + return -EINVAL; + + if (offset + count > datalen) + count = datalen - offset; + + if (count) + memcpy(buffer, ((u8 *)data) + offset, count); + + return count; +} + +static void devcd_freev(void *data) +{ + vfree(data); +} + +/** + * dev_coredumpv - create device coredump with vmalloc data + * @dev: the struct device for the crashed device + * @data: vmalloc data containing the device coredump + * @datalen: length of the data + * @gfp: allocation flags + * + * This function takes ownership of the vmalloc'ed data and will free + * it when it is no longer used. See dev_coredumpm() for more information. + */ +void dev_coredumpv(struct device *dev, void *data, size_t datalen, + gfp_t gfp) +{ + dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev); +} +EXPORT_SYMBOL_GPL(dev_coredumpv); + +static int devcd_match_failing(struct device *dev, const void *failing) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->failing_dev == failing; +} + +/** + * devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained + * using the sg_chain function then that function should be called only once + * on the chained table + * @table: pointer to sg_table to free + */ +static void devcd_free_sgtable(void *data) +{ + _devcd_free_sgtable(data); +} + +/** + * devcd_read_from_table - copy data from sg_table to a given buffer + * and return the number of bytes read + * @buffer: the buffer to copy the data to it + * @buf_len: the length of the buffer + * @data: the scatterlist table to copy from + * @offset: start copy from @offset@ bytes from the head of the data + * in the given scatterlist + * @data_len: the length of the data in the sg_table + */ +static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, + size_t buf_len, void *data, + size_t data_len) +{ + struct scatterlist *table = data; + + if (offset > data_len) + return -EINVAL; + + if (offset + buf_len > data_len) + buf_len = data_len - offset; + return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, + offset); +} + +/** + * dev_coredumpm - create device coredump with read/free methods + * @dev: the struct device for the crashed device + * @owner: the module that contains the read/free functions, use %THIS_MODULE + * @data: data cookie for the @read/@free functions + * @datalen: length of the data + * @gfp: allocation flags + * @read: function to read from the given buffer + * @free: function to free the given buffer + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed the @free + * function will be called to free the data. + */ +void dev_coredumpm(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen), + void (*free)(void *data)) +{ + static atomic_t devcd_count = ATOMIC_INIT(0); + struct devcd_entry *devcd; + struct device *existing; + + if (devcd_disabled) + goto free; + + existing = class_find_device(&devcd_class, NULL, dev, + devcd_match_failing); + if (existing) { + put_device(existing); + goto free; + } + + if (!try_module_get(owner)) + goto free; + + devcd = kzalloc(sizeof(*devcd), gfp); + if (!devcd) + goto put_module; + + devcd->owner = owner; + devcd->data = data; + devcd->datalen = datalen; + devcd->read = read; + devcd->free = free; + devcd->failing_dev = get_device(dev); + + device_initialize(&devcd->devcd_dev); + + dev_set_name(&devcd->devcd_dev, "devcd%d", + atomic_inc_return(&devcd_count)); + devcd->devcd_dev.class = &devcd_class; + + if (device_add(&devcd->devcd_dev)) + goto put_device; + +#if LINUX_VERSION_IS_LESS(3,11,0) + if (device_create_bin_file(&devcd->devcd_dev, &devcd_attr_data)) + goto put_device; +#endif + + if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, + "failing_device")) + /* nothing - symlink will be missing */; + + if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, + "devcoredump")) + /* nothing - symlink will be missing */; + + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); + + return; + put_device: + put_device(&devcd->devcd_dev); + put_module: + module_put(owner); + free: + free(data); +} +EXPORT_SYMBOL_GPL(dev_coredumpm); + +/** + * dev_coredumpmsg - create device coredump that uses scatterlist as data + * parameter + * @dev: the struct device for the crashed device + * @table: the dump data + * @datalen: length of the data + * @gfp: allocation flags + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed + * it will free the data. + */ +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable, + devcd_free_sgtable); +} +EXPORT_SYMBOL_GPL(dev_coredumpsg); + +int __init devcoredump_init(void) +{ + return class_register(&devcd_class); +} + +void __exit devcoredump_exit(void) +{ + class_for_each_device(&devcd_class, NULL, NULL, devcd_free); + class_unregister(&devcd_class); +} diff --git a/compat/hid-ids.h b/compat/hid-ids.h new file mode 100644 index 0000000..c147dc0 --- /dev/null +++ b/compat/hid-ids.h @@ -0,0 +1,866 @@ +/* + * USB HID quirks support for Linux + * + * Copyright (c) 1999 Andreas Gal + * Copyright (c) 2000-2005 Vojtech Pavlik + * Copyright (c) 2005 Michael Haboustak for Concept2, Inc + * Copyright (c) 2006-2007 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef HID_IDS_H_FILE +#define HID_IDS_H_FILE + +#define USB_VENDOR_ID_3M 0x0596 +#define USB_DEVICE_ID_3M1968 0x0500 +#define USB_DEVICE_ID_3M2256 0x0502 +#define USB_DEVICE_ID_3M3266 0x0506 + +#define USB_VENDOR_ID_A4TECH 0x09da +#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 +#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a +#define USB_DEVICE_ID_A4TECH_RP_649 0x001a + +#define USB_VENDOR_ID_AASHIMA 0x06d6 +#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025 +#define USB_DEVICE_ID_AASHIMA_PREDATOR 0x0026 + +#define USB_VENDOR_ID_ACECAD 0x0460 +#define USB_DEVICE_ID_ACECAD_FLAIR 0x0004 +#define USB_DEVICE_ID_ACECAD_302 0x0008 + +#define USB_VENDOR_ID_ACRUX 0x1a34 + +#define USB_VENDOR_ID_ACTIONSTAR 0x2101 +#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 + +#define USB_VENDOR_ID_ADS_TECH 0x06e1 +#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 + +#define USB_VENDOR_ID_AFATECH 0x15a4 +#define USB_DEVICE_ID_AFATECH_AF9016 0x9016 + +#define USB_VENDOR_ID_AIPTEK 0x08ca +#define USB_DEVICE_ID_AIPTEK_01 0x0001 +#define USB_DEVICE_ID_AIPTEK_10 0x0010 +#define USB_DEVICE_ID_AIPTEK_20 0x0020 +#define USB_DEVICE_ID_AIPTEK_21 0x0021 +#define USB_DEVICE_ID_AIPTEK_22 0x0022 +#define USB_DEVICE_ID_AIPTEK_23 0x0023 +#define USB_DEVICE_ID_AIPTEK_24 0x0024 + +#define USB_VENDOR_ID_AIRCABLE 0x16CA +#define USB_DEVICE_ID_AIRCABLE1 0x1502 + +#define USB_VENDOR_ID_AIREN 0x1a2c +#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002 + +#define USB_VENDOR_ID_ALCOR 0x058f +#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 + +#define USB_VENDOR_ID_ALPS 0x0433 +#define USB_DEVICE_ID_IBM_GAMEPAD 0x1101 + +#define USB_VENDOR_ID_APPLE 0x05ac +#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 +#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d +#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e +#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e +#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f +#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 +#define USB_DEVICE_ID_APPLE_GEYSER_ISO 0x0215 +#define USB_DEVICE_ID_APPLE_GEYSER_JIS 0x0216 +#define USB_DEVICE_ID_APPLE_GEYSER3_ANSI 0x0217 +#define USB_DEVICE_ID_APPLE_GEYSER3_ISO 0x0218 +#define USB_DEVICE_ID_APPLE_GEYSER3_JIS 0x0219 +#define USB_DEVICE_ID_APPLE_GEYSER4_ANSI 0x021a +#define USB_DEVICE_ID_APPLE_GEYSER4_ISO 0x021b +#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c +#define USB_DEVICE_ID_APPLE_ALU_MINI_ANSI 0x021d +#define USB_DEVICE_ID_APPLE_ALU_MINI_ISO 0x021e +#define USB_DEVICE_ID_APPLE_ALU_MINI_JIS 0x021f +#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 +#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 +#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 +#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 +#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 +#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 +#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 +#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a +#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e +#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 +#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 +#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 +#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 +#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 +#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 +#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f +#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250 +#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 +#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 +#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a +#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d +#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264 +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 +#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a +#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b +#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 +#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 + +#define USB_VENDOR_ID_ASUS 0x0486 +#define USB_DEVICE_ID_ASUS_T91MT 0x0185 +#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186 + +#define USB_VENDOR_ID_ASUSTEK 0x0b05 +#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 +#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b + +#define USB_VENDOR_ID_ATEN 0x0557 +#define USB_DEVICE_ID_ATEN_UC100KM 0x2004 +#define USB_DEVICE_ID_ATEN_CS124U 0x2202 +#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204 +#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 +#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 + +#define USB_VENDOR_ID_ATMEL 0x03eb +#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c +#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 + +#define USB_VENDOR_ID_AUREAL 0x0755 +#define USB_DEVICE_ID_AUREAL_W01RN 0x2626 + +#define USB_VENDOR_ID_AVERMEDIA 0x07ca +#define USB_DEVICE_ID_AVER_FM_MR800 0xb800 + +#define USB_VENDOR_ID_AXENTIA 0x12cf +#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111 + +#define USB_VENDOR_ID_BAANTO 0x2453 +#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100 + +#define USB_VENDOR_ID_BELKIN 0x050d +#define USB_DEVICE_ID_FLIP_KVM 0x3201 + +#define USB_VENDOR_ID_BERKSHIRE 0x0c98 +#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140 + +#define USB_VENDOR_ID_BTC 0x046e +#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 +#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 + +#define USB_VENDOR_ID_CANDO 0x2087 +#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01 + +#define USB_VENDOR_ID_CH 0x068e +#define USB_DEVICE_ID_CH_PRO_THROTTLE 0x00f1 +#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 +#define USB_DEVICE_ID_CH_FIGHTERSTICK 0x00f3 +#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4 +#define USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE 0x0051 +#define USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE 0x00ff +#define USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK 0x00d3 +#define USB_DEVICE_ID_CH_AXIS_295 0x001c + +#define USB_VENDOR_ID_CHERRY 0x046a +#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 +#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 + +#define USB_VENDOR_ID_CHIC 0x05fe +#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 + +#define USB_VENDOR_ID_CHICONY 0x04f2 +#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 +#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d +#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 +#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 +#define USB_DEVICE_ID_CHICONY_AK1D 0x1125 + +#define USB_VENDOR_ID_CHUNGHWAT 0x2247 +#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 + +#define USB_VENDOR_ID_CIDC 0x1677 + +#define USB_VENDOR_ID_CMEDIA 0x0d8c +#define USB_DEVICE_ID_CM109 0x000e + +#define USB_VENDOR_ID_CODEMERCS 0x07c0 +#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500 +#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff + +#define USB_VENDOR_ID_CREATIVELABS 0x041e +#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 + +#define USB_VENDOR_ID_CVTOUCH 0x1ff7 +#define USB_DEVICE_ID_CVTOUCH_SCREEN 0x0013 + +#define USB_VENDOR_ID_CYGNAL 0x10c4 +#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a + +#define USB_VENDOR_ID_CYPRESS 0x04b4 +#define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001 +#define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500 +#define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE 0x7417 +#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61 +#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64 +#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1 +#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81 +#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 + +#define USB_VENDOR_ID_DEALEXTREAME 0x10c5 +#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a + +#define USB_VENDOR_ID_DELORME 0x1163 +#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 +#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 + +#define USB_VENDOR_ID_DMI 0x0c0b +#define USB_DEVICE_ID_DMI_ENC 0x5fab + +#define USB_VENDOR_ID_DRAGONRISE 0x0079 + +#define USB_VENDOR_ID_DWAV 0x0eef +#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 +#define USB_DEVICE_ID_DWAV_TOUCHCONTROLLER 0x0002 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207 0x7207 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A 0x722A +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E 0x725e +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262 0x7262 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA 0x72aa +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4 + +#define USB_VENDOR_ID_ELECOM 0x056e +#define USB_DEVICE_ID_ELECOM_BM084 0x0061 + +#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 + +#define USB_VENDOR_ID_ELO 0x04E7 +#define USB_DEVICE_ID_ELO_TS2515 0x0022 +#define USB_DEVICE_ID_ELO_TS2700 0x0020 + +#define USB_VENDOR_ID_EMS 0x2006 +#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118 + +#define USB_VENDOR_ID_FLATFROG 0x25b5 +#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002 + +#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f +#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 + +#define USB_VENDOR_ID_ETT 0x0664 +#define USB_DEVICE_ID_TC5UH 0x0309 +#define USB_DEVICE_ID_TC4UM 0x0306 + +#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 +#define USB_DEVICE_ID_ETURBOTOUCH 0x0006 + +#define USB_VENDOR_ID_EZKEY 0x0518 +#define USB_DEVICE_ID_BTC_8193 0x0002 + +#define USB_VENDOR_ID_FREESCALE 0x15A2 +#define USB_DEVICE_ID_FREESCALE_MX28 0x004F + +#define USB_VENDOR_ID_FRUCTEL 0x25B6 +#define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002 + +#define USB_VENDOR_ID_GAMERON 0x0810 +#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 +#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 + +#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100 + +#define USB_VENDOR_ID_GLAB 0x06c2 +#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 +#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039 +#define USB_DEVICE_ID_0_0_4_IF_KIT 0x0040 +#define USB_DEVICE_ID_0_16_16_IF_KIT 0x0044 +#define USB_DEVICE_ID_8_8_8_IF_KIT 0x0045 +#define USB_DEVICE_ID_0_8_7_IF_KIT 0x0051 +#define USB_DEVICE_ID_0_8_8_IF_KIT 0x0053 +#define USB_DEVICE_ID_PHIDGET_MOTORCONTROL 0x0058 + +#define USB_VENDOR_ID_GOODTOUCH 0x1aad +#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f + +#define USB_VENDOR_ID_GOTOP 0x08f2 +#define USB_DEVICE_ID_SUPER_Q2 0x007f +#define USB_DEVICE_ID_GOGOPEN 0x00ce +#define USB_DEVICE_ID_PENPOWER 0x00f4 + +#define USB_VENDOR_ID_GREENASIA 0x0e8f +#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 + +#define USB_VENDOR_ID_GRETAGMACBETH 0x0971 +#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 + +#define USB_VENDOR_ID_GRIFFIN 0x077d +#define USB_DEVICE_ID_POWERMATE 0x0410 +#define USB_DEVICE_ID_SOUNDKNOB 0x04AA +#define USB_DEVICE_ID_RADIOSHARK 0x627a + +#define USB_VENDOR_ID_GTCO 0x078c +#define USB_DEVICE_ID_GTCO_90 0x0090 +#define USB_DEVICE_ID_GTCO_100 0x0100 +#define USB_DEVICE_ID_GTCO_101 0x0101 +#define USB_DEVICE_ID_GTCO_103 0x0103 +#define USB_DEVICE_ID_GTCO_104 0x0104 +#define USB_DEVICE_ID_GTCO_105 0x0105 +#define USB_DEVICE_ID_GTCO_106 0x0106 +#define USB_DEVICE_ID_GTCO_107 0x0107 +#define USB_DEVICE_ID_GTCO_108 0x0108 +#define USB_DEVICE_ID_GTCO_200 0x0200 +#define USB_DEVICE_ID_GTCO_201 0x0201 +#define USB_DEVICE_ID_GTCO_202 0x0202 +#define USB_DEVICE_ID_GTCO_203 0x0203 +#define USB_DEVICE_ID_GTCO_204 0x0204 +#define USB_DEVICE_ID_GTCO_205 0x0205 +#define USB_DEVICE_ID_GTCO_206 0x0206 +#define USB_DEVICE_ID_GTCO_207 0x0207 +#define USB_DEVICE_ID_GTCO_300 0x0300 +#define USB_DEVICE_ID_GTCO_301 0x0301 +#define USB_DEVICE_ID_GTCO_302 0x0302 +#define USB_DEVICE_ID_GTCO_303 0x0303 +#define USB_DEVICE_ID_GTCO_304 0x0304 +#define USB_DEVICE_ID_GTCO_305 0x0305 +#define USB_DEVICE_ID_GTCO_306 0x0306 +#define USB_DEVICE_ID_GTCO_307 0x0307 +#define USB_DEVICE_ID_GTCO_308 0x0308 +#define USB_DEVICE_ID_GTCO_309 0x0309 +#define USB_DEVICE_ID_GTCO_400 0x0400 +#define USB_DEVICE_ID_GTCO_401 0x0401 +#define USB_DEVICE_ID_GTCO_402 0x0402 +#define USB_DEVICE_ID_GTCO_403 0x0403 +#define USB_DEVICE_ID_GTCO_404 0x0404 +#define USB_DEVICE_ID_GTCO_405 0x0405 +#define USB_DEVICE_ID_GTCO_500 0x0500 +#define USB_DEVICE_ID_GTCO_501 0x0501 +#define USB_DEVICE_ID_GTCO_502 0x0502 +#define USB_DEVICE_ID_GTCO_503 0x0503 +#define USB_DEVICE_ID_GTCO_504 0x0504 +#define USB_DEVICE_ID_GTCO_1000 0x1000 +#define USB_DEVICE_ID_GTCO_1001 0x1001 +#define USB_DEVICE_ID_GTCO_1002 0x1002 +#define USB_DEVICE_ID_GTCO_1003 0x1003 +#define USB_DEVICE_ID_GTCO_1004 0x1004 +#define USB_DEVICE_ID_GTCO_1005 0x1005 +#define USB_DEVICE_ID_GTCO_1006 0x1006 +#define USB_DEVICE_ID_GTCO_1007 0x1007 + +#define USB_VENDOR_ID_GYRATION 0x0c16 +#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 +#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008 + +#define USB_VENDOR_ID_HANWANG 0x0b57 +#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 +#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff + +#define USB_VENDOR_ID_HANVON 0x20b3 +#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18 + +#define USB_VENDOR_ID_HANVON_ALT 0x22ed +#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010 + +#define USB_VENDOR_ID_HAPP 0x078b +#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 +#define USB_DEVICE_ID_UGCI_FLYING 0x0020 +#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 + +#define USB_VENDOR_ID_IDEACOM 0x1cb6 +#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 +#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 + +#define USB_VENDOR_ID_ILITEK 0x222a +#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 + +#define USB_VENDOR_ID_ION 0x15e4 +#define USB_DEVICE_ID_ICADE 0x0132 + +#define USB_VENDOR_ID_HOLTEK 0x1241 +#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015 + +#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9 +#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 + +#define USB_VENDOR_ID_IMATION 0x0718 +#define USB_DEVICE_ID_DISC_STAKKA 0xd000 + +#define USB_VENDOR_ID_INTEL_8086 0x8086 +#define USB_VENDOR_ID_INTEL_8087 0x8087 +#define USB_DEVICE_ID_SENSOR_HUB_1020 0x1020 +#define USB_DEVICE_ID_SENSOR_HUB_09FA 0x09FA + +#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 +#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 + +#define USB_VENDOR_ID_JESS 0x0c45 +#define USB_DEVICE_ID_JESS_YUREX 0x1010 + +#define USB_VENDOR_ID_KBGEAR 0x084e +#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001 + +#define USB_VENDOR_ID_KENSINGTON 0x047d +#define USB_DEVICE_ID_KS_SLIMBLADE 0x2041 + +#define USB_VENDOR_ID_KWORLD 0x1b80 +#define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700 + +#define USB_VENDOR_ID_KEYTOUCH 0x0926 +#define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333 + +#define USB_VENDOR_ID_KYE 0x0458 +#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 +#define USB_DEVICE_ID_KYE_GPEN_560 0x5003 +#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 +#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 +#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 + +#define USB_VENDOR_ID_LABTEC 0x1020 +#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 + +#define USB_VENDOR_ID_LCPOWER 0x1241 +#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 + +#define USB_VENDOR_ID_LD 0x0f11 +#define USB_DEVICE_ID_LD_CASSY 0x1000 +#define USB_DEVICE_ID_LD_CASSY2 0x1001 +#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 +#define USB_DEVICE_ID_LD_POCKETCASSY2 0x1011 +#define USB_DEVICE_ID_LD_MOBILECASSY 0x1020 +#define USB_DEVICE_ID_LD_MOBILECASSY2 0x1021 +#define USB_DEVICE_ID_LD_MICROCASSYVOLTAGE 0x1031 +#define USB_DEVICE_ID_LD_MICROCASSYCURRENT 0x1032 +#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 +#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 +#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 +#define USB_DEVICE_ID_LD_JWM 0x1080 +#define USB_DEVICE_ID_LD_DMMP 0x1081 +#define USB_DEVICE_ID_LD_UMIP 0x1090 +#define USB_DEVICE_ID_LD_UMIC 0x10A0 +#define USB_DEVICE_ID_LD_UMIB 0x10B0 +#define USB_DEVICE_ID_LD_XRAY 0x1100 +#define USB_DEVICE_ID_LD_XRAY2 0x1101 +#define USB_DEVICE_ID_LD_XRAYCT 0x1110 +#define USB_DEVICE_ID_LD_VIDEOCOM 0x1200 +#define USB_DEVICE_ID_LD_MOTOR 0x1210 +#define USB_DEVICE_ID_LD_COM3LAB 0x2000 +#define USB_DEVICE_ID_LD_TELEPORT 0x2010 +#define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020 +#define USB_DEVICE_ID_LD_POWERCONTROL 0x2030 +#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 +#define USB_DEVICE_ID_LD_MOSTANALYSER 0x2050 +#define USB_DEVICE_ID_LD_MOSTANALYSER2 0x2051 +#define USB_DEVICE_ID_LD_ABSESP 0x2060 +#define USB_DEVICE_ID_LD_AUTODATABUS 0x2070 +#define USB_DEVICE_ID_LD_MCT 0x2080 +#define USB_DEVICE_ID_LD_HYBRID 0x2090 +#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0 + +#define USB_VENDOR_ID_LENOVO 0x17ef +#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 + +#define USB_VENDOR_ID_LG 0x1fd2 +#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 + +#define USB_VENDOR_ID_LOGITECH 0x046d +#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e +#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 +#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 +#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f +#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 +#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a +#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211 +#define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215 +#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218 +#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 +#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 +#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 +#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287 +#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 +#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 +#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 +#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298 +#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299 +#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a +#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b +#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c +#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_S510_RECEIVER 0xc50c +#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 +#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 +#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 +#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b +#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 +#define USB_DEVICE_ID_SPACETRAVELLER 0xc623 +#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 +#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 +#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 +#define USB_DEVICE_ID_DINOVO_MINI 0xc71f +#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03 + +#define USB_VENDOR_ID_LUMIO 0x202e +#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 +#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007 + +#define USB_VENDOR_ID_MADCATZ 0x0738 +#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 + +#define USB_VENDOR_ID_MCC 0x09db +#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 +#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a + +#define USB_VENDOR_ID_MGE 0x0463 +#define USB_DEVICE_ID_MGE_UPS 0xffff +#define USB_DEVICE_ID_MGE_UPS1 0x0001 + +#define USB_VENDOR_ID_MICROCHIP 0x04d8 +#define USB_DEVICE_ID_PICKIT1 0x0032 +#define USB_DEVICE_ID_PICKIT2 0x0033 +#define USB_DEVICE_ID_PICOLCD 0xc002 +#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002 + +#define USB_VENDOR_ID_MICROSOFT 0x045e +#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b +#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d +#define USB_DEVICE_ID_MS_NE4K 0x00db +#define USB_DEVICE_ID_MS_LK6K 0x00f9 +#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 +#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 +#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 +#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c + +#define USB_VENDOR_ID_MOJO 0x8282 +#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 + +#define USB_VENDOR_ID_MONTEREY 0x0566 +#define USB_DEVICE_ID_GENIUS_KB29E 0x3004 + +#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 +#define USB_DEVICE_ID_N_S_HARMONY 0xc359 + +#define USB_VENDOR_ID_NATSU 0x08b7 +#define USB_DEVICE_ID_NATSU_GAMEPAD 0x0001 + +#define USB_VENDOR_ID_NCR 0x0404 +#define USB_DEVICE_ID_NCR_FIRST 0x0300 +#define USB_DEVICE_ID_NCR_LAST 0x03ff + +#define USB_VENDOR_ID_NEC 0x073e +#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 + +#define USB_VENDOR_ID_NEXTWINDOW 0x1926 +#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 + +#define USB_VENDOR_ID_NINTENDO 0x057e +#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 + +#define USB_VENDOR_ID_NOVATEK 0x0603 +#define USB_DEVICE_ID_NOVATEK_PCT 0x0600 + +#define USB_VENDOR_ID_NTRIG 0x1b96 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2 0x0004 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3 0x0005 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4 0x0006 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5 0x0007 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6 0x0008 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7 0x0009 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8 0x000A +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9 0x000B +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10 0x000C +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11 0x000D +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12 0x000E +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13 0x000F +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14 0x0010 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15 0x0011 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013 +#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014 + +#define USB_VENDOR_ID_ONTRAK 0x0a07 +#define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 + +#define USB_VENDOR_ID_ORTEK 0x05a4 +#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 +#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 + +#define USB_VENDOR_ID_PANASONIC 0x04da +#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 +#define USB_DEVICE_ID_PANABOARD_UBT880 0x104d + +#define USB_VENDOR_ID_PANJIT 0x134c + +#define USB_VENDOR_ID_PANTHERLORD 0x0810 +#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001 + +#define USB_VENDOR_ID_PENMOUNT 0x14e1 +#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 + +#define USB_VENDOR_ID_PETALYNX 0x18b1 +#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 + +#define USB_VENDOR_ID_PHILIPS 0x0471 +#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 + +#define USB_VENDOR_ID_PI_ENGINEERING 0x05f3 +#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff + +#define USB_VENDOR_ID_PIXART 0x093a +#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001 +#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002 +#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003 + +#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43 +#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003 + +#define USB_VENDOR_ID_POWERCOM 0x0d9f +#define USB_DEVICE_ID_POWERCOM_UPS 0x0002 + +#define USB_VENDOR_ID_PRODIGE 0x05af +#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062 + +#define USB_VENDOR_ID_QUANTA 0x0408 +#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 +#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 +#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 + +#define USB_VENDOR_ID_ROCCAT 0x1e7d +#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 +#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c +#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced +#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51 +#define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22 +#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50 +#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e +#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24 +#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6 +#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a + +#define USB_VENDOR_ID_SAITEK 0x06a3 +#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 +#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 + +#define USB_VENDOR_ID_SAMSUNG 0x0419 +#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 +#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 + +#define USB_VENDOR_ID_SENNHEISER 0x1395 +#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c + +#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f +#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 + +#define USB_VENDOR_ID_SIGMATEL 0x066F +#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 + +#define USB_VENDOR_ID_SKYCABLE 0x1223 +#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 + +#define USB_VENDOR_ID_SONY 0x054c +#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b +#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306 +#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 +#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f + +#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2 +#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 +#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 + +#define USB_VENDOR_ID_STANTUM 0x1f87 +#define USB_DEVICE_ID_MTP 0x0002 + +#define USB_VENDOR_ID_STANTUM_STM 0x0483 +#define USB_DEVICE_ID_MTP_STM 0x3261 +#define USB_DEVICE_ID_SENSOR_HUB_7014 0x7014 + +#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403 +#define USB_DEVICE_ID_MTP_SITRONIX 0x5001 + +#define USB_VENDOR_ID_SUN 0x0430 +#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab + +#define USB_VENDOR_ID_SUNPLUS 0x04fc +#define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 + +#define USB_VENDOR_ID_SYMBOL 0x05e0 +#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 +#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 + +#define USB_VENDOR_ID_SYNAPTICS 0x06cb +#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 +#define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 +#define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003 +#define USB_DEVICE_ID_SYNAPTICS_TS 0x0006 +#define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007 +#define USB_DEVICE_ID_SYNAPTICS_WP 0x0008 +#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 +#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 +#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 + +#define USB_VENDOR_ID_THRUSTMASTER 0x044f + +#define USB_VENDOR_ID_TIVO 0x150a +#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 +#define USB_DEVICE_ID_TIVO_SLIDE 0x1201 + +#define USB_VENDOR_ID_TOPSEED 0x0766 +#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 + +#define USB_VENDOR_ID_TOPSEED2 0x1784 +#define USB_DEVICE_ID_TOPSEED2_RF_COMBO 0x0004 +#define USB_DEVICE_ID_TOPSEED2_PERIPAD_701 0x0016 + +#define USB_VENDOR_ID_TOPMAX 0x0663 +#define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 + +#define USB_VENDOR_ID_TOUCH_INTL 0x1e5e +#define USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH 0x0313 + +#define USB_VENDOR_ID_TOUCHPACK 0x1bfd +#define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688 + +#define USB_VENDOR_ID_TPV 0x25aa +#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883 + +#define USB_VENDOR_ID_TURBOX 0x062a +#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 +#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100 + +#define USB_VENDOR_ID_TWINHAN 0x6253 +#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 + +#define USB_VENDOR_ID_UCLOGIC 0x5543 +#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 +#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 +#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064 +#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 +#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004 +#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 +#define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064 +#define USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850 0x0522 +#define USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60 0x0781 + +#define USB_VENDOR_ID_UNITEC 0x227d +#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 +#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 + +#define USB_VENDOR_ID_VERNIER 0x08f7 +#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 +#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 +#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 +#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 +#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 + +#define USB_VENDOR_ID_WACOM 0x056a +#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 +#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD + +#define USB_VENDOR_ID_WALTOP 0x172f +#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032 +#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH 0x0034 +#define USB_DEVICE_ID_WALTOP_Q_PAD 0x0037 +#define USB_DEVICE_ID_WALTOP_PID_0038 0x0038 +#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH 0x0501 +#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500 +#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502 + +#define USB_VENDOR_ID_WISEGROUP 0x0925 +#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 +#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 +#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104 +#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201 +#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888 +#define USB_DEVICE_ID_QUAD_USB_JOYPAD 0x8800 +#define USB_DEVICE_ID_DUAL_USB_JOYPAD 0x8866 + +#define USB_VENDOR_ID_WISEGROUP_LTD 0x6666 +#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677 +#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 +#define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801 +#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 +#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804 + +#define USB_VENDOR_ID_X_TENSIONS 0x1ae7 +#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001 + +#define USB_VENDOR_ID_XAT 0x2505 +#define USB_DEVICE_ID_XAT_CSR 0x0220 + +#define USB_VENDOR_ID_XIROKU 0x1477 +#define USB_DEVICE_ID_XIROKU_SPX 0x1006 +#define USB_DEVICE_ID_XIROKU_MPX 0x1007 +#define USB_DEVICE_ID_XIROKU_CSR 0x100e +#define USB_DEVICE_ID_XIROKU_SPX1 0x1021 +#define USB_DEVICE_ID_XIROKU_CSR1 0x1022 +#define USB_DEVICE_ID_XIROKU_MPX1 0x1023 +#define USB_DEVICE_ID_XIROKU_SPX2 0x1024 +#define USB_DEVICE_ID_XIROKU_CSR2 0x1025 +#define USB_DEVICE_ID_XIROKU_MPX2 0x1026 + +#define USB_VENDOR_ID_YEALINK 0x6993 +#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001 + +#define USB_VENDOR_ID_ZEROPLUS 0x0c12 + +#define USB_VENDOR_ID_ZYDACRON 0x13EC +#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006 + +#define USB_VENDOR_ID_ZYTRONIC 0x14c8 +#define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005 + +#define USB_VENDOR_ID_PRIMAX 0x0461 +#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 + +#endif diff --git a/compat/lib-cordic.c b/compat/lib-cordic.c new file mode 100644 index 0000000..6cf4778 --- /dev/null +++ b/compat/lib-cordic.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include + +#define CORDIC_ANGLE_GEN 39797 +#define CORDIC_PRECISION_SHIFT 16 +#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) + +#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) +#define FLOAT(X) (((X) >= 0) \ + ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ + : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) + +static const s32 arctan_table[] = { + 2949120, + 1740967, + 919879, + 466945, + 234379, + 117304, + 58666, + 29335, + 14668, + 7334, + 3667, + 1833, + 917, + 458, + 229, + 115, + 57, + 29 +}; + +/* + * cordic_calc_iq() - calculates the i/q coordinate for given angle + * + * theta: angle in degrees for which i/q coordinate is to be calculated + * coord: function output parameter holding the i/q coordinate + */ +struct cordic_iq cordic_calc_iq(s32 theta) +{ + struct cordic_iq coord; + s32 angle, valtmp; + unsigned iter; + int signx = 1; + int signtheta; + + coord.i = CORDIC_ANGLE_GEN; + coord.q = 0; + angle = 0; + + theta = FIXED(theta); + signtheta = (theta < 0) ? -1 : 1; + theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - + FIXED(180) * signtheta; + + if (FLOAT(theta) > 90) { + theta -= FIXED(180); + signx = -1; + } else if (FLOAT(theta) < -90) { + theta += FIXED(180); + signx = -1; + } + + for (iter = 0; iter < CORDIC_NUM_ITER; iter++) { + if (theta > angle) { + valtmp = coord.i - (coord.q >> iter); + coord.q += (coord.i >> iter); + angle += arctan_table[iter]; + } else { + valtmp = coord.i + (coord.q >> iter); + coord.q -= (coord.i >> iter); + angle -= arctan_table[iter]; + } + coord.i = valtmp; + } + + coord.i *= signx; + coord.q *= signx; + return coord; +} +EXPORT_SYMBOL(cordic_calc_iq); + +MODULE_DESCRIPTION("CORDIC algorithm"); +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/compat/lib-rhashtable.c b/compat/lib-rhashtable.c new file mode 100644 index 0000000..2e49827 --- /dev/null +++ b/compat/lib-rhashtable.c @@ -0,0 +1,1187 @@ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Copyright (c) 2015 Herbert Xu + * Copyright (c) 2014-2015 Thomas Graf + * Copyright (c) 2008-2014 Patrick McHardy + * + * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HASH_DEFAULT_SIZE 64UL +#define HASH_MIN_SIZE 4U +#define BUCKET_LOCKS_PER_CPU 32UL + +union nested_table { + union nested_table __rcu *table; + struct rhash_head __rcu *bucket; +}; + +static u32 head_hashfn(struct rhashtable *ht, + const struct bucket_table *tbl, + const struct rhash_head *he) +{ + return rht_head_hashfn(ht, tbl, he, ht->p); +} + +#ifdef CONFIG_PROVE_LOCKING +#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) + +int lockdep_rht_mutex_is_held(struct rhashtable *ht) +{ + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); + +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + spinlock_t *lock = rht_bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); +#else +#define ASSERT_RHT_MUTEX(HT) +#endif + + +static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, + gfp_t gfp) +{ + unsigned int i, size; +#if defined(CONFIG_PROVE_LOCKING) + unsigned int nr_pcpus = 2; +#else + unsigned int nr_pcpus = num_possible_cpus(); +#endif + + nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); + size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); + + /* Never allocate more than 0.5 locks per bucket */ + size = min_t(unsigned int, size, tbl->size >> 1); + + if (tbl->nest) + size = min(size, 1U << tbl->nest); + + if (sizeof(spinlock_t) != 0) { +#if LINUX_VERSION_IS_LESS(4,12,0) + tbl->locks = NULL; +#ifdef CONFIG_NUMA + if (size * sizeof(spinlock_t) > PAGE_SIZE && + gfp == GFP_KERNEL) + tbl->locks = vmalloc(size * sizeof(spinlock_t)); +#endif + if (gfp != GFP_KERNEL) + gfp |= __GFP_NOWARN | __GFP_NORETRY; + + if (!tbl->locks) + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), + gfp); +#else + if (gfpflags_allow_blocking(gfp)) + tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp); + else + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), + gfp); +#endif + if (!tbl->locks) + return -ENOMEM; + for (i = 0; i < size; i++) + spin_lock_init(&tbl->locks[i]); + } + tbl->locks_mask = size - 1; + + return 0; +} + +static void nested_table_free(union nested_table *ntbl, unsigned int size) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + const unsigned int len = 1 << shift; + unsigned int i; + + ntbl = rcu_dereference_raw(ntbl->table); + if (!ntbl) + return; + + if (size > len) { + size >>= shift; + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + } + + kfree(ntbl); +} + +static void nested_bucket_table_free(const struct bucket_table *tbl) +{ + unsigned int size = tbl->size >> tbl->nest; + unsigned int len = 1 << tbl->nest; + union nested_table *ntbl; + unsigned int i; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + + kfree(ntbl); +} + +static void bucket_table_free(const struct bucket_table *tbl) +{ + if (tbl->nest) + nested_bucket_table_free(tbl); + + kvfree(tbl->locks); + kvfree(tbl); +} + +static void bucket_table_free_rcu(struct rcu_head *head) +{ + bucket_table_free(container_of(head, struct bucket_table, rcu)); +} + +static union nested_table *nested_table_alloc(struct rhashtable *ht, + union nested_table __rcu **prev, + unsigned int shifted, + unsigned int nhash) +{ + union nested_table *ntbl; + int i; + + ntbl = rcu_dereference(*prev); + if (ntbl) + return ntbl; + + ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + + if (ntbl && shifted) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) + INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, + (i << shifted) | nhash); + } + + rcu_assign_pointer(*prev, ntbl); + + return ntbl; +} + +static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets, + gfp_t gfp) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + struct bucket_table *tbl; + size_t size; + + if (nbuckets < (1 << (shift + 1))) + return NULL; + + size = sizeof(*tbl) + sizeof(tbl->buckets[0]); + + tbl = kzalloc(size, gfp); + if (!tbl) + return NULL; + + if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, + 0, 0)) { + kfree(tbl); + return NULL; + } + + tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; + + return tbl; +} + +static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets, + gfp_t gfp) +{ + struct bucket_table *tbl = NULL; + size_t size; + int i; + + size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || + gfp != GFP_KERNEL) + tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); + if (tbl == NULL && gfp == GFP_KERNEL) + tbl = vzalloc(size); + + size = nbuckets; + + if (tbl == NULL && gfp != GFP_KERNEL) { + tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); + nbuckets = 0; + } + if (tbl == NULL) + return NULL; + + tbl->size = size; + + if (alloc_bucket_locks(ht, tbl, gfp) < 0) { + bucket_table_free(tbl); + return NULL; + } + + INIT_LIST_HEAD(&tbl->walkers); + + get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); + + for (i = 0; i < nbuckets; i++) + INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + + return tbl; +} + +static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, + struct bucket_table *tbl) +{ + struct bucket_table *new_tbl; + + do { + new_tbl = tbl; + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + } while (tbl); + + return new_tbl; +} + +static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) +{ + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *new_tbl = rhashtable_last_table(ht, + rht_dereference_rcu(old_tbl->future_tbl, ht)); + struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); + int err = -EAGAIN; + struct rhash_head *head, *next, *entry; + spinlock_t *new_bucket_lock; + unsigned int new_hash; + + if (new_tbl->nest) + goto out; + + err = -ENOENT; + + rht_for_each(entry, old_tbl, old_hash) { + err = 0; + next = rht_dereference_bucket(entry->next, old_tbl, old_hash); + + if (rht_is_a_nulls(next)) + break; + + pprev = &entry->next; + } + + if (err) + goto out; + + new_hash = head_hashfn(ht, new_tbl, entry); + + new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); + + spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); + head = rht_dereference_bucket(new_tbl->buckets[new_hash], + new_tbl, new_hash); + + RCU_INIT_POINTER(entry->next, head); + + rcu_assign_pointer(new_tbl->buckets[new_hash], entry); + spin_unlock(new_bucket_lock); + + rcu_assign_pointer(*pprev, next); + +out: + return err; +} + +static int rhashtable_rehash_chain(struct rhashtable *ht, + unsigned int old_hash) +{ + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + spinlock_t *old_bucket_lock; + int err; + + old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); + + spin_lock_bh(old_bucket_lock); + while (!(err = rhashtable_rehash_one(ht, old_hash))) + ; + + if (err == -ENOENT) { + old_tbl->rehash++; + err = 0; + } + spin_unlock_bh(old_bucket_lock); + + return err; +} + +static int rhashtable_rehash_attach(struct rhashtable *ht, + struct bucket_table *old_tbl, + struct bucket_table *new_tbl) +{ + /* Protect future_tbl using the first bucket lock. */ + spin_lock_bh(old_tbl->locks); + + /* Did somebody beat us to it? */ + if (rcu_access_pointer(old_tbl->future_tbl)) { + spin_unlock_bh(old_tbl->locks); + return -EEXIST; + } + + /* Make insertions go into the new, empty table right away. Deletions + * and lookups will be attempted in both tables until we synchronize. + */ + rcu_assign_pointer(old_tbl->future_tbl, new_tbl); + + spin_unlock_bh(old_tbl->locks); + + return 0; +} + +static int rhashtable_rehash_table(struct rhashtable *ht) +{ + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *new_tbl; + struct rhashtable_walker *walker; + unsigned int old_hash; + int err; + + new_tbl = rht_dereference(old_tbl->future_tbl, ht); + if (!new_tbl) + return 0; + + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + err = rhashtable_rehash_chain(ht, old_hash); + if (err) + return err; + } + + /* Publish the new table pointer. */ + rcu_assign_pointer(ht->tbl, new_tbl); + + spin_lock(&ht->lock); + list_for_each_entry(walker, &old_tbl->walkers, list) + walker->tbl = NULL; + spin_unlock(&ht->lock); + + /* Wait for readers. All new readers will see the new + * table, and thus no references to the old table will + * remain. + */ + call_rcu(&old_tbl->rcu, bucket_table_free_rcu); + + return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; +} + +static int rhashtable_rehash_alloc(struct rhashtable *ht, + struct bucket_table *old_tbl, + unsigned int size) +{ + struct bucket_table *new_tbl; + int err; + + ASSERT_RHT_MUTEX(ht); + + new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); + if (new_tbl == NULL) + return -ENOMEM; + + err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); + if (err) + bucket_table_free(new_tbl); + + return err; +} + +/** + * rhashtable_shrink - Shrink hash table while allowing concurrent lookups + * @ht: the hash table to shrink + * + * This function shrinks the hash table to fit, i.e., the smallest + * size would not cause it to expand right away automatically. + * + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * + * The caller must ensure that no concurrent table mutations take place. + * It is however valid to have concurrent lookups if they are RCU protected. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. + */ +static int rhashtable_shrink(struct rhashtable *ht) +{ + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + unsigned int nelems = atomic_read(&ht->nelems); + unsigned int size = 0; + + if (nelems) + size = roundup_pow_of_two(nelems * 3 / 2); + if (size < ht->p.min_size) + size = ht->p.min_size; + + if (old_tbl->size <= size) + return 0; + + if (rht_dereference(old_tbl->future_tbl, ht)) + return -EEXIST; + + return rhashtable_rehash_alloc(ht, old_tbl, size); +} + +static void rht_deferred_worker(struct work_struct *work) +{ + struct rhashtable *ht; + struct bucket_table *tbl; + int err = 0; + + ht = container_of(work, struct rhashtable, run_work); + mutex_lock(&ht->mutex); + + tbl = rht_dereference(ht->tbl, ht); + tbl = rhashtable_last_table(ht, tbl); + + if (rht_grow_above_75(ht, tbl)) + err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); + else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) + err = rhashtable_shrink(ht); + else if (tbl->nest) + err = rhashtable_rehash_alloc(ht, tbl, tbl->size); + + if (!err) + err = rhashtable_rehash_table(ht); + + mutex_unlock(&ht->mutex); + + if (err) + schedule_work(&ht->run_work); +} + +static int rhashtable_insert_rehash(struct rhashtable *ht, + struct bucket_table *tbl) +{ + struct bucket_table *old_tbl; + struct bucket_table *new_tbl; + unsigned int size; + int err; + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + + size = tbl->size; + + err = -EBUSY; + + if (rht_grow_above_75(ht, tbl)) + size *= 2; + /* Do not schedule more than one rehash */ + else if (old_tbl != tbl) + goto fail; + + err = -ENOMEM; + + new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); + if (new_tbl == NULL) + goto fail; + + err = rhashtable_rehash_attach(ht, tbl, new_tbl); + if (err) { + bucket_table_free(new_tbl); + if (err == -EEXIST) + err = 0; + } else + schedule_work(&ht->run_work); + + return err; + +fail: + /* Do not fail the insert if someone else did a rehash. */ + if (likely(rcu_dereference_raw(tbl->future_tbl))) + return 0; + + /* Schedule async rehash to retry allocation in process context. */ + if (err == -ENOMEM) + schedule_work(&ht->run_work); + + return err; +} + +static void *rhashtable_lookup_one(struct rhashtable *ht, + struct bucket_table *tbl, unsigned int hash, + const void *key, struct rhash_head *obj) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct rhash_head __rcu **pprev; + struct rhash_head *head; + int elasticity; + + elasticity = ht->elasticity; + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(head, *pprev, tbl, hash) { + struct rhlist_head *list; + struct rhlist_head *plist; + + elasticity--; + if (!key || + (ht->p.obj_cmpfn ? + ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head)))) + continue; + + if (!ht->rhlist) + return rht_obj(ht, head); + + list = container_of(obj, struct rhlist_head, rhead); + plist = container_of(head, struct rhlist_head, rhead); + + RCU_INIT_POINTER(list->next, plist); + head = rht_dereference_bucket(head->next, tbl, hash); + RCU_INIT_POINTER(list->rhead.next, head); + rcu_assign_pointer(*pprev, obj); + + return NULL; + } + + if (elasticity <= 0) + return ERR_PTR(-EAGAIN); + + return ERR_PTR(-ENOENT); +} + +static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash, + struct rhash_head *obj, + void *data) +{ + struct rhash_head __rcu **pprev; + struct bucket_table *new_tbl; + struct rhash_head *head; + + if (!IS_ERR_OR_NULL(data)) + return ERR_PTR(-EEXIST); + + if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) + return ERR_CAST(data); + + new_tbl = rcu_dereference(tbl->future_tbl); + if (new_tbl) + return new_tbl; + + if (PTR_ERR(data) != -ENOENT) + return ERR_CAST(data); + + if (unlikely(rht_grow_above_max(ht, tbl))) + return ERR_PTR(-E2BIG); + + if (unlikely(rht_grow_above_100(ht, tbl))) + return ERR_PTR(-EAGAIN); + + pprev = rht_bucket_insert(ht, tbl, hash); + if (!pprev) + return ERR_PTR(-ENOMEM); + + head = rht_dereference_bucket(*pprev, tbl, hash); + + RCU_INIT_POINTER(obj->next, head); + if (ht->rhlist) { + struct rhlist_head *list; + + list = container_of(obj, struct rhlist_head, rhead); + RCU_INIT_POINTER(list->next, NULL); + } + + rcu_assign_pointer(*pprev, obj); + + atomic_inc(&ht->nelems); + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + + return NULL; +} + +static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, + struct rhash_head *obj) +{ + struct bucket_table *new_tbl; + struct bucket_table *tbl; + unsigned int hash; + spinlock_t *lock; + void *data; + + tbl = rcu_dereference(ht->tbl); + + /* All insertions must grab the oldest table containing + * the hashed bucket that is yet to be rehashed. + */ + for (;;) { + hash = rht_head_hashfn(ht, tbl, obj, ht->p); + lock = rht_bucket_lock(tbl, hash); + spin_lock_bh(lock); + + if (tbl->rehash <= hash) + break; + + spin_unlock_bh(lock); + tbl = rcu_dereference(tbl->future_tbl); + } + + data = rhashtable_lookup_one(ht, tbl, hash, key, obj); + new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); + if (PTR_ERR(new_tbl) != -EEXIST) + data = ERR_CAST(new_tbl); + + while (!IS_ERR_OR_NULL(new_tbl)) { + tbl = new_tbl; + hash = rht_head_hashfn(ht, tbl, obj, ht->p); + spin_lock_nested(rht_bucket_lock(tbl, hash), + SINGLE_DEPTH_NESTING); + + data = rhashtable_lookup_one(ht, tbl, hash, key, obj); + new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); + if (PTR_ERR(new_tbl) != -EEXIST) + data = ERR_CAST(new_tbl); + + spin_unlock(rht_bucket_lock(tbl, hash)); + } + + spin_unlock_bh(lock); + + if (PTR_ERR(data) == -EAGAIN) + data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: + -EAGAIN); + + return data; +} + +void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj) +{ + void *data; + + do { + rcu_read_lock(); + data = rhashtable_try_insert(ht, key, obj); + rcu_read_unlock(); + } while (PTR_ERR(data) == -EAGAIN); + + return data; +} +EXPORT_SYMBOL_GPL(rhashtable_insert_slow); + +/** + * rhashtable_walk_enter - Initialise an iterator + * @ht: Table to walk over + * @iter: Hash table Iterator + * + * This function prepares a hash table walk. + * + * Note that if you restart a walk after rhashtable_walk_stop you + * may see the same object twice. Also, you may miss objects if + * there are removals in between rhashtable_walk_stop and the next + * call to rhashtable_walk_start. + * + * For a completely stable walk you should construct your own data + * structure outside the hash table. + * + * This function may sleep so you must not call it from interrupt + * context or with spin locks held. + * + * You must call rhashtable_walk_exit after this function returns. + */ +void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) +{ + iter->ht = ht; + iter->p = NULL; + iter->slot = 0; + iter->skip = 0; + + spin_lock(&ht->lock); + iter->walker.tbl = + rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); + list_add(&iter->walker.list, &iter->walker.tbl->walkers); + spin_unlock(&ht->lock); +} +EXPORT_SYMBOL_GPL(rhashtable_walk_enter); + +/** + * rhashtable_walk_exit - Free an iterator + * @iter: Hash table Iterator + * + * This function frees resources allocated by rhashtable_walk_init. + */ +void rhashtable_walk_exit(struct rhashtable_iter *iter) +{ + spin_lock(&iter->ht->lock); + if (iter->walker.tbl) + list_del(&iter->walker.list); + spin_unlock(&iter->ht->lock); +} +EXPORT_SYMBOL_GPL(rhashtable_walk_exit); + +/** + * rhashtable_walk_start - Start a hash table walk + * @iter: Hash table iterator + * + * Start a hash table walk. Note that we take the RCU lock in all + * cases including when we return an error. So you must always call + * rhashtable_walk_stop to clean up. + * + * Returns zero if successful. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may use it immediately + * by calling rhashtable_walk_next. + */ +int rhashtable_walk_start(struct rhashtable_iter *iter) + __acquires(RCU) +{ + struct rhashtable *ht = iter->ht; + + rcu_read_lock(); + + spin_lock(&ht->lock); + if (iter->walker.tbl) + list_del(&iter->walker.list); + spin_unlock(&ht->lock); + + if (!iter->walker.tbl) { + iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); + return -EAGAIN; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_start); + +/** + * rhashtable_walk_next - Return the next object and advance the iterator + * @iter: Hash table iterator + * + * Note that you must call rhashtable_walk_stop when you are finished + * with the walk. + * + * Returns the next object or NULL when the end of the table is reached. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may continue to use it. + */ +void *rhashtable_walk_next(struct rhashtable_iter *iter) +{ + struct bucket_table *tbl = iter->walker.tbl; + struct rhlist_head *list = iter->list; + struct rhashtable *ht = iter->ht; + struct rhash_head *p = iter->p; + bool rhlist = ht->rhlist; + + if (p) { + if (!rhlist || !(list = rcu_dereference(list->next))) { + p = rcu_dereference(p->next); + list = container_of(p, struct rhlist_head, rhead); + } + goto next; + } + + for (; iter->slot < tbl->size; iter->slot++) { + int skip = iter->skip; + + rht_for_each_rcu(p, tbl, iter->slot) { + if (rhlist) { + list = container_of(p, struct rhlist_head, + rhead); + do { + if (!skip) + goto next; + skip--; + list = rcu_dereference(list->next); + } while (list); + + continue; + } + if (!skip) + break; + skip--; + } + +next: + if (!rht_is_a_nulls(p)) { + iter->skip++; + iter->p = p; + iter->list = list; + return rht_obj(ht, rhlist ? &list->rhead : p); + } + + iter->skip = 0; + } + + iter->p = NULL; + + /* Ensure we see any new tables. */ + smp_rmb(); + + iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (iter->walker.tbl) { + iter->slot = 0; + iter->skip = 0; + return ERR_PTR(-EAGAIN); + } + + return NULL; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_next); + +/** + * rhashtable_walk_stop - Finish a hash table walk + * @iter: Hash table iterator + * + * Finish a hash table walk. + */ +void rhashtable_walk_stop(struct rhashtable_iter *iter) + __releases(RCU) +{ + struct rhashtable *ht; + struct bucket_table *tbl = iter->walker.tbl; + + if (!tbl) + goto out; + + ht = iter->ht; + + spin_lock(&ht->lock); + if (tbl->rehash < tbl->size) + list_add(&iter->walker.list, &tbl->walkers); + else + iter->walker.tbl = NULL; + spin_unlock(&ht->lock); + + iter->p = NULL; + +out: + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(rhashtable_walk_stop); + +static size_t rounded_hashtable_size(const struct rhashtable_params *params) +{ + return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + (unsigned long)params->min_size); +} + +static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) +{ + return jhash2(key, length, seed); +} + +/** + * rhashtable_init - initialize a new hash table + * @ht: hash table to be initialized + * @params: configuration parameters + * + * Initializes a new hash table based on the provided configuration + * parameters. A table can be configured either with a variable or + * fixed length key: + * + * Configuration Example 1: Fixed length keys + * struct test_obj { + * int key; + * void * my_member; + * struct rhash_head node; + * }; + * + * struct rhashtable_params params = { + * .head_offset = offsetof(struct test_obj, node), + * .key_offset = offsetof(struct test_obj, key), + * .key_len = sizeof(int), + * .hashfn = jhash, + * .nulls_base = (1U << RHT_BASE_SHIFT), + * }; + * + * Configuration Example 2: Variable length keys + * struct test_obj { + * [...] + * struct rhash_head node; + * }; + * + * u32 my_hash_fn(const void *data, u32 len, u32 seed) + * { + * struct test_obj *obj = data; + * + * return [... hash ...]; + * } + * + * struct rhashtable_params params = { + * .head_offset = offsetof(struct test_obj, node), + * .hashfn = jhash, + * .obj_hashfn = my_hash_fn, + * }; + */ +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params) +{ + struct bucket_table *tbl; + size_t size; + + size = HASH_DEFAULT_SIZE; + + if ((!params->key_len && !params->obj_hashfn) || + (params->obj_hashfn && !params->obj_cmpfn)) + return -EINVAL; + + if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) + return -EINVAL; + + memset(ht, 0, sizeof(*ht)); + mutex_init(&ht->mutex); + spin_lock_init(&ht->lock); + memcpy(&ht->p, params, sizeof(*params)); + + if (params->min_size) + ht->p.min_size = roundup_pow_of_two(params->min_size); + + if (params->max_size) + ht->p.max_size = rounddown_pow_of_two(params->max_size); + + if (params->insecure_max_entries) + ht->p.insecure_max_entries = + rounddown_pow_of_two(params->insecure_max_entries); + else + ht->p.insecure_max_entries = ht->p.max_size * 2; + + ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); + + if (params->nelem_hint) + size = rounded_hashtable_size(&ht->p); + + /* The maximum (not average) chain length grows with the + * size of the hash table, at a rate of (log N)/(log log N). + * The value of 16 is selected so that even if the hash + * table grew to 2^32 you would not expect the maximum + * chain length to exceed it unless we are under attack + * (or extremely unlucky). + * + * As this limit is only to detect attacks, we don't need + * to set it to a lower value as you'd need the chain + * length to vastly exceed 16 to have any real effect + * on the system. + */ + if (!params->insecure_elasticity) + ht->elasticity = 16; + + if (params->locks_mul) + ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); + else + ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; + + ht->key_len = ht->p.key_len; + if (!params->hashfn) { + ht->p.hashfn = jhash; + + if (!(ht->key_len & (sizeof(u32) - 1))) { + ht->key_len /= sizeof(u32); + ht->p.hashfn = rhashtable_jhash2; + } + } + + tbl = bucket_table_alloc(ht, size, GFP_KERNEL); + if (tbl == NULL) + return -ENOMEM; + + atomic_set(&ht->nelems, 0); + + RCU_INIT_POINTER(ht->tbl, tbl); + + INIT_WORK(&ht->run_work, rht_deferred_worker); + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_init); + +/** + * rhltable_init - initialize a new hash list table + * @hlt: hash list table to be initialized + * @params: configuration parameters + * + * Initializes a new hash list table. + * + * See documentation for rhashtable_init. + */ +int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) +{ + int err; + + /* No rhlist NULLs marking for now. */ + if (params->nulls_base) + return -EINVAL; + + err = rhashtable_init(&hlt->ht, params); + hlt->ht.rhlist = true; + return err; +} +EXPORT_SYMBOL_GPL(rhltable_init); + +static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, + void (*free_fn)(void *ptr, void *arg), + void *arg) +{ + struct rhlist_head *list; + + if (!ht->rhlist) { + free_fn(rht_obj(ht, obj), arg); + return; + } + + list = container_of(obj, struct rhlist_head, rhead); + do { + obj = &list->rhead; + list = rht_dereference(list->next, ht); + free_fn(rht_obj(ht, obj), arg); + } while (list); +} + +/** + * rhashtable_free_and_destroy - free elements and destroy hash table + * @ht: the hash table to destroy + * @free_fn: callback to release resources of element + * @arg: pointer passed to free_fn + * + * Stops an eventual async resize. If defined, invokes free_fn for each + * element to releasal resources. Please note that RCU protected + * readers may still be accessing the elements. Releasing of resources + * must occur in a compatible manner. Then frees the bucket array. + * + * This function will eventually sleep to wait for an async resize + * to complete. The caller is responsible that no further write operations + * occurs in parallel. + */ +void rhashtable_free_and_destroy(struct rhashtable *ht, + void (*free_fn)(void *ptr, void *arg), + void *arg) +{ + struct bucket_table *tbl; + unsigned int i; + + cancel_work_sync(&ht->run_work); + + mutex_lock(&ht->mutex); + tbl = rht_dereference(ht->tbl, ht); + if (free_fn) { + for (i = 0; i < tbl->size; i++) { + struct rhash_head *pos, *next; + + for (pos = rht_dereference(*rht_bucket(tbl, i), ht), + next = !rht_is_a_nulls(pos) ? + rht_dereference(pos->next, ht) : NULL; + !rht_is_a_nulls(pos); + pos = next, + next = !rht_is_a_nulls(pos) ? + rht_dereference(pos->next, ht) : NULL) + rhashtable_free_one(ht, pos, free_fn, arg); + } + } + + bucket_table_free(tbl); + mutex_unlock(&ht->mutex); +} +EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); + +void rhashtable_destroy(struct rhashtable *ht) +{ + return rhashtable_free_and_destroy(ht, NULL, NULL); +} +EXPORT_SYMBOL_GPL(rhashtable_destroy); + +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + static struct rhash_head __rcu *rhnull = + (struct rhash_head __rcu *)NULLS_MARKER(0); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + unsigned int subhash = hash; + union nested_table *ntbl; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); + subhash >>= tbl->nest; + + while (ntbl && size > (1 << shift)) { + index = subhash & ((1 << shift) - 1); + ntbl = rht_dereference_bucket_rcu(ntbl[index].table, + tbl, hash); + size >>= shift; + subhash >>= shift; + } + + if (!ntbl) + return &rhnull; + + return &ntbl[subhash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested); + +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + union nested_table *ntbl; + unsigned int shifted; + unsigned int nhash; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + hash >>= tbl->nest; + nhash = index; + shifted = tbl->nest; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, nhash); + + while (ntbl && size > (1 << shift)) { + index = hash & ((1 << shift) - 1); + size >>= shift; + hash >>= shift; + nhash |= index << shifted; + shifted += shift; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, + nhash); + } + + if (!ntbl) + return NULL; + + return &ntbl[hash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); diff --git a/compat/main.c b/compat/main.c new file mode 100644 index 0000000..5d45e3d --- /dev/null +++ b/compat/main.c @@ -0,0 +1,92 @@ +#include +#include +#include +#include +#include "backports.h" + +MODULE_AUTHOR("Luis R. Rodriguez"); +MODULE_DESCRIPTION("Kernel backport module"); +MODULE_LICENSE("GPL"); + +#ifndef CPTCFG_KERNEL_NAME +#error "You need a CPTCFG_KERNEL_NAME" +#endif + +#ifndef CPTCFG_KERNEL_VERSION +#error "You need a CPTCFG_KERNEL_VERSION" +#endif + +#ifndef CPTCFG_VERSION +#error "You need a CPTCFG_VERSION" +#endif + +static char *backported_kernel_name = CPTCFG_KERNEL_NAME; + +module_param(backported_kernel_name, charp, 0400); +MODULE_PARM_DESC(backported_kernel_name, + "The kernel tree name that was used for this backport (" CPTCFG_KERNEL_NAME ")"); + +#ifdef BACKPORTS_GIT_TRACKED +static char *backports_tracker_id = BACKPORTS_GIT_TRACKED; +module_param(backports_tracker_id, charp, 0400); +MODULE_PARM_DESC(backports_tracker_id, + "The version of the tree containing this backport (" BACKPORTS_GIT_TRACKED ")"); +#else +static char *backported_kernel_version = CPTCFG_KERNEL_VERSION; +static char *backports_version = CPTCFG_VERSION; + +module_param(backported_kernel_version, charp, 0400); +MODULE_PARM_DESC(backported_kernel_version, + "The kernel version that was used for this backport (" CPTCFG_KERNEL_VERSION ")"); + +module_param(backports_version, charp, 0400); +MODULE_PARM_DESC(backports_version, + "The git version of the backports tree used to generate this backport (" CPTCFG_VERSION ")"); + +#endif + +void backport_dependency_symbol(void) +{ +} +EXPORT_SYMBOL_GPL(backport_dependency_symbol); + + +static int __init backport_init(void) +{ + int ret = crypto_ccm_module_init(); + if (ret) + return ret; + + ret = devcoredump_init(); + if (ret) { + crypto_ccm_module_exit(); + return ret; + } + + printk(KERN_INFO "Loading modules backported from " CPTCFG_KERNEL_NAME +#ifndef BACKPORTS_GIT_TRACKED + " version " CPTCFG_KERNEL_VERSION +#endif + "\n"); +#ifdef BACKPORTS_GIT_TRACKED + printk(KERN_INFO BACKPORTS_GIT_TRACKED "\n"); +#else + +#ifdef CONFIG_BACKPORT_INTEGRATE + printk(KERN_INFO "Backport integrated by backports.git " CPTCFG_VERSION "\n"); +#else + printk(KERN_INFO "Backport generated by backports.git " CPTCFG_VERSION "\n"); +#endif /* CONFIG_BACKPORT_INTEGRATE */ + +#endif /* BACKPORTS_GIT_TRACKED */ + + return 0; +} +subsys_initcall(backport_init); + +static void __exit backport_exit(void) +{ + crypto_ccm_module_exit(); + devcoredump_exit(); +} +module_exit(backport_exit); diff --git a/compat/user_namespace.c b/compat/user_namespace.c new file mode 100644 index 0000000..6d01404 --- /dev/null +++ b/compat/user_namespace.c @@ -0,0 +1,68 @@ +/* + * Copyright 2012 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Backport functionality introduced in Linux user_namespace.c + */ + +#include +#include +#include +#include + +#ifdef CONFIG_USER_NS + +kuid_t make_kuid(struct user_namespace *ns, uid_t uid) +{ + /* Map the uid to a global kernel uid */ + return KUIDT_INIT(uid); +} +EXPORT_SYMBOL_GPL(make_kuid); + +uid_t from_kuid(struct user_namespace *targ, kuid_t kuid) +{ + /* Map the uid from a global kernel uid */ + return __kuid_val(kuid); +} +EXPORT_SYMBOL_GPL(from_kuid); + +uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid) +{ + uid_t uid; + uid = from_kuid(targ, kuid); + + if (uid == (uid_t) -1) + uid = overflowuid; + return uid; +} +EXPORT_SYMBOL_GPL(from_kuid_munged); + +kgid_t make_kgid(struct user_namespace *ns, gid_t gid) +{ + /* Map the gid to a global kernel gid */ + return KGIDT_INIT(gid); +} +EXPORT_SYMBOL_GPL(make_kgid); + +gid_t from_kgid(struct user_namespace *targ, kgid_t kgid) +{ + /* Map the gid from a global kernel gid */ + return __kgid_val(kgid); +} +EXPORT_SYMBOL_GPL(from_kgid); + +gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid) +{ + gid_t gid; + gid = from_kgid(targ, kgid); + + if (gid == (gid_t) -1) + gid = overflowgid; + return gid; +} +EXPORT_SYMBOL_GPL(from_kgid_munged); + +#endif /* CONFIG_USER_NS */ -- cgit v1.2.3