diff options
1173 files changed, 13573 insertions, 7077 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd index dbedafb095e2..eb781c624a78 100644 --- a/Documentation/ABI/testing/sysfs-bus-rbd +++ b/Documentation/ABI/testing/sysfs-bus-rbd @@ -51,12 +51,6 @@ current_snap The current snapshot for which the device is mapped. -create_snap - - Create a snapshot: - - $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create - snap_* A directory per each snapshot diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 9b1067afb224..68c5411d70a2 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -466,6 +466,10 @@ Note: 5.3 swappiness Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. +Please note that unlike the global swappiness, memcg knob set to 0 +really prevents from any swapping even if there is a swap storage +available. This might lead to memcg OOM killer if there are no file +pages to reclaim. Following cgroups' swappiness can't be changed. - root cgroup (uses /proc/sys/vm/swappiness). diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt index ecc81e368715..d187e9f7cf1c 100644 --- a/Documentation/devicetree/bindings/arm/atmel-at91.txt +++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt @@ -8,7 +8,7 @@ PIT Timer required properties: shared across all System Controller members. TC/TCLIB Timer required properties: -- compatible: Should be "atmel,<chip>-pit". +- compatible: Should be "atmel,<chip>-tcb". <chip> can be "at91rm9200" or "at91sam9x5" - reg: Should contain registers location and length - interrupts: Should contain all interrupts for the TC block diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware index d1d4a179a382..b361e08570f7 100755 --- a/Documentation/dvb/get_dvb_firmware +++ b/Documentation/dvb/get_dvb_firmware @@ -115,7 +115,7 @@ sub tda10045 { sub tda10046 { my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip"; - my $url = "http://www.tt-download.com/download/updates/219/$sourcefile"; + my $url = "http://technotrend.com.ua/download/software/219/$sourcefile"; my $hash = "6a7e1e2f2644b162ff0502367553c72d"; my $outfile = "dvb-fe-tda10046.fw"; my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 71f55bbcefc8..99d4e442b77d 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 @@ -21,6 +21,7 @@ Supported adapters: * Intel DH89xxCC (PCH) * Intel Panther Point (PCH) * Intel Lynx Point (PCH) + * Intel Lynx Point-LP (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 22ce1007b775..c0d908c1d1bc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -566,6 +566,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. UART at the specified I/O port or MMIO address, switching to the matching ttyS device later. The options are the same as for ttyS, above. + hvc<n> Use the hypervisor console device <n>. This is for + both Xen and PowerPC hypervisors. If the device connected to the port is not a TTY but a braille device, prepend "brl," before the device type, for instance @@ -751,6 +753,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. earlyprintk= [X86,SH,BLACKFIN] earlyprintk=vga + earlyprintk=xen earlyprintk=serial[,ttySn[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] @@ -768,6 +771,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The VGA output is eventually overwritten by the real console. + The xen output can only be used by Xen PV guests. + ekgdboc= [X86,KGDB] Allow early kernel console debugging ekgdboc=kbd diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1619a8c80873..797715397f43 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -537,6 +537,11 @@ tcp_thin_dupack - BOOLEAN Documentation/networking/tcp-thin.txt Default: 0 +tcp_challenge_ack_limit - INTEGER + Limits number of Challenge ACK sent per second, as recommended + in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) + Default: 100 + UDP variables: udp_mem - vector of 3 INTEGERs: min, pressure, max diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 286ec04d1383..82dd174396d1 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -47,6 +47,7 @@ ALC882/883/885/888/889 acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others + no-primary-hp VAIO Z workaround (for fixed speaker DAC) ALC861/660 ========== diff --git a/MAINTAINERS b/MAINTAINERS index c56bd02248d3..c5b99af4b2c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2627,7 +2627,7 @@ S: Maintained F: drivers/net/ethernet/i825xx/eexpress.* ETHERNET BRIDGE -M: Stephen Hemminger <shemminger@vyatta.com> +M: Stephen Hemminger <stephen@networkplumber.org> L: bridge@lists.linux-foundation.org L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net:Bridge @@ -4312,7 +4312,7 @@ S: Maintained MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) M: Mirko Lindner <mlindner@marvell.com> -M: Stephen Hemminger <shemminger@vyatta.com> +M: Stephen Hemminger <stephen@networkplumber.org> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/sk* @@ -4563,7 +4563,7 @@ S: Supported F: drivers/infiniband/hw/nes/ NETEM NETWORK EMULATOR -M: Stephen Hemminger <shemminger@vyatta.com> +M: Stephen Hemminger <stephen@networkplumber.org> L: netem@lists.linux-foundation.org S: Maintained F: net/sched/sch_netem.c @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 4 -SUBLEVEL = 10 +SUBLEVEL = 35 EXTRAVERSION = NAME = Saber-toothed Squirrel diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 3bb7ffeae3bc..c2cbe4fc391c 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@ */ -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h index db00f7885faa..e477bcd5b94a 100644 --- a/arch/alpha/include/asm/fpu.h +++ b/arch/alpha/include/asm/fpu.h @@ -1,7 +1,9 @@ #ifndef __ASM_ALPHA_FPU_H #define __ASM_ALPHA_FPU_H +#ifdef __KERNEL__ #include <asm/special_insns.h> +#endif /* * Alpha floating-point control register defines: diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index dcb221a4b5be..7d2f75be932e 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -76,9 +76,11 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#ifdef __KERNEL__ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 +#endif /* __KERNEL__ */ #endif /* _ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 153d3fce3e8e..8e3d91be76b0 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -28,6 +28,7 @@ #include <linux/tty.h> #include <linux/console.h> #include <linux/slab.h> +#include <linux/rcupdate.h> #include <asm/reg.h> #include <asm/uaccess.h> @@ -54,8 +55,11 @@ cpu_idle(void) /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ + rcu_idle_enter(); while (!need_resched()) cpu_relax(); + + rcu_idle_exit(); schedule(); } } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a3c66dd776e2..d39efbb4987e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -592,6 +592,7 @@ config ARCH_KIRKWOOD bool "Marvell Kirkwood" select CPU_FEROCEON select PCI + select PCI_QUIRKS select ARCH_REQUIRE_GPIOLIB select GENERIC_CLOCKEVENTS select NEED_MACH_IO_H @@ -1450,6 +1451,16 @@ config PL310_ERRATA_769419 on systems with an outer cache, the store buffer is drained explicitly. +config ARM_ERRATA_775420 + bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" + depends on CPU_V7 + help + This option enables the workaround for the 775420 Cortex-A9 (r2p2, + r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance + operation aborts with MMU exception, it might cause the processor + to deadlock. This workaround puts DSB before executing ISB if + an abort may occur on cache maintenance. + endmenu source "arch/arm/common/Kconfig" @@ -2221,6 +2232,7 @@ source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX tristate "CPUfreq driver for i.MX CPUs" depends on ARCH_MXC && CPU_FREQ + select CPU_FREQ_TABLE help This enables the CPUfreq driver for i.MX CPUs. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 3afd07659ad0..8676c911f22a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -287,10 +287,10 @@ zImage Image xipImage bootpImage uImage: vmlinux zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: +%.dtb: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -dtbs: +dtbs: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ # We use MRPROPER_FILES and CLEAN_FILES now diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index ace2dee28286..adff07672796 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -648,6 +648,7 @@ __armv7_mmu_cache_on: mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0d..5c8b3bf4d825 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@ .size \name , . - \name .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcccs \tmp, \tmp, \limit + bcs \bad +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 917626128a1d..a2fe893802a6 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -18,11 +18,12 @@ #define HWCAP_THUMBEE (1 << 11) #define HWCAP_NEON (1 << 12) #define HWCAP_VFPv3 (1 << 13) -#define HWCAP_VFPv3D16 (1 << 14) +#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ #define HWCAP_TLS (1 << 15) #define HWCAP_VFPv4 (1 << 16) #define HWCAP_IDIVA (1 << 17) #define HWCAP_IDIVT (1 << 18) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 6e813d93a736..b1479fd04a95 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -9,13 +9,8 @@ #define _ASM_MUTEX_H /* * On pre-ARMv6 hardware this results in a swp-based implementation, - * which is the most efficient. For ARMv6+, we have exclusive memory - * accessors and use atomic_dec to avoid the extra xchg operations - * on the locking slowpaths. + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -#if __LINUX_ARM_ARCH__ < 6 #include <asm-generic/mutex-xchg.h> -#else -#include <asm-generic/mutex-dec.h> -#endif #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 752514cb34de..3e8d23335a03 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -221,6 +221,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -232,25 +244,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -277,13 +279,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 71f6536d17ac..0a070e98625d 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...) \ +#define __GUP_CLOBBER_1 "lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2 "ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4 "lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p) \ - : __i, "cc") + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) #define get_user(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, 1, "lr"); \ - break; \ + __get_user_x(__r2, __p, __e, __l, 1); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, 4, "lr"); \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -135,31 +146,34 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s) \ +#define __put_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2) \ + : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") #define put_user(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, 1); \ + __put_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, 2); \ + __put_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, 4); \ + __put_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, 8); \ + __put_user_x(__r2, __p, __e, __l, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 3d5fc41ae8d3..c49c8f778b5d 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -27,9 +27,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field @@ -51,9 +51,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index ba386bd94107..18d39ea4c02f 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -159,6 +159,12 @@ static int debug_arch_supported(void) arch >= ARM_DEBUG_ARCH_V7_1; } +/* Can we determine the watchpoint access type from the fsr? */ +static int debug_exception_updates_fsr(void) +{ + return 0; +} + /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { @@ -619,18 +625,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; - /* - * Currently we rely on an overflow handler to take - * care of single-stepping the breakpoint when it fires. - * In the case of userspace breakpoints on a core with V7 debug, - * we can use the mismatch feature as a poor-man's hardware - * single-step, but this only works for per-task breakpoints. - */ - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || - !core_has_mismatch_brps() || !bp->hw.bp_target)) { - pr_warning("overflow handler required but none found\n"); - ret = -EINVAL; + if (!bp->overflow_handler) { + /* + * Mismatch breakpoints are required for single-stepping + * breakpoints. + */ + if (!core_has_mismatch_brps()) + return -EINVAL; + + /* We don't allow mismatch breakpoints in kernel space. */ + if (arch_check_bp_in_kernelspace(bp)) + return -EPERM; + + /* + * Per-cpu breakpoints are not supported by our stepping + * mechanism. + */ + if (!bp->hw.bp_target) + return -EINVAL; + + /* + * We only support specific access types if the fsr + * reports them. + */ + if (!debug_exception_updates_fsr() && + (info->ctrl.type == ARM_BREAKPOINT_LOAD || + info->ctrl.type == ARM_BREAKPOINT_STORE)) + return -EINVAL; } + out: return ret; } @@ -706,10 +729,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, goto unlock; /* Check that the access type matches. */ - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : - HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; + if (debug_exception_updates_fsr()) { + access = (fsr & ARM_FSR_ACCESS_MASK) ? + HW_BREAKPOINT_W : HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; + } /* We have a winner. */ info->trigger = addr; diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index f4515393248d..63bc22c88507 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -89,11 +89,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 429d995a7981..998d5c9682ff 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -345,18 +345,24 @@ static void percpu_timer_setup(void); asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); cpu_init(); preempt_disable(); diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index df745188f5de..ab1017bd1667 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -109,10 +109,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); info.si_signo = SIGSEGV; info.si_errno = 0; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c7cae6b9a4d9..eeb752093a56 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -388,20 +388,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #endif instr = *(u32 *) pc; } else if (thumb_mode(regs)) { - get_user(instr, (u16 __user *)pc); + if (get_user(instr, (u16 __user *)pc)) + goto die_sig; if (is_wide_instruction(instr)) { unsigned int instr2; - get_user(instr2, (u16 __user *)pc+1); + if (get_user(instr2, (u16 __user *)pc+1)) + goto die_sig; instr <<= 16; instr |= instr2; } - } else { - get_user(instr, (u32 __user *)pc); + } else if (get_user(instr, (u32 __user *)pc)) { + goto die_sig; } if (call_undef_hook(regs, instr) == 0) return; +die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 11093a7c3e32..9b06bb41fca6 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -16,8 +16,9 @@ * __get_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * Outputs: r0 is the error code - * r2, r3 contains the zero-extended value + * r2 contains the zero-extended value * lr corrupted * * No other registers must be altered. (see <asm/uaccess.h> @@ -27,33 +28,39 @@ * Note also that it is intended that __get_user_bad is not global. */ #include <linux/linkage.h> +#include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> ENTRY(__get_user_1) + check_uaccess r0, 1, r1, r2, __get_user_bad 1: TUSER(ldrb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__get_user_1) ENTRY(__get_user_2) -#ifdef CONFIG_THUMB2_KERNEL -2: TUSER(ldrb) r2, [r0] -3: TUSER(ldrb) r3, [r0, #1] + check_uaccess r0, 2, r1, r2, __get_user_bad +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +2: ldrbt r2, [r0], #1 +3: ldrbt rb, [r0], #0 #else -2: TUSER(ldrb) r2, [r0], #1 -3: TUSER(ldrb) r3, [r0] +rb .req r0 +2: ldrb r2, [r0] +3: ldrb rb, [r0, #1] #endif #ifndef __ARMEB__ - orr r2, r2, r3, lsl #8 + orr r2, r2, rb, lsl #8 #else - orr r2, r3, r2, lsl #8 + orr r2, rb, r2, lsl #8 #endif mov r0, #0 mov pc, lr ENDPROC(__get_user_2) ENTRY(__get_user_4) + check_uaccess r0, 4, r1, r2, __get_user_bad 4: TUSER(ldr) r2, [r0] mov r0, #0 mov pc, lr diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 7db25990c589..3d73dcb959b0 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -16,6 +16,7 @@ * __put_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * r2, r3 contains the value * Outputs: r0 is the error code * lr corrupted @@ -27,16 +28,19 @@ * Note also that it is intended that __put_user_bad is not global. */ #include <linux/linkage.h> +#include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> ENTRY(__put_user_1) + check_uaccess r0, 1, r1, ip, __put_user_bad 1: TUSER(strb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_1) ENTRY(__put_user_2) + check_uaccess r0, 2, r1, ip, __put_user_bad mov ip, r2, lsr #8 #ifdef CONFIG_THUMB2_KERNEL #ifndef __ARMEB__ @@ -60,12 +64,14 @@ ENTRY(__put_user_2) ENDPROC(__put_user_2) ENTRY(__put_user_4) + check_uaccess r0, 4, r1, ip, __put_user_bad 4: TUSER(str) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_4) ENTRY(__put_user_8) + check_uaccess r0, 8, r1, ip, __put_user_bad #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(str) r2, [r0] 6: TUSER(str) r3, [r0, #4] diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 05774e5b1cba..3b0c719f9fe9 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -463,7 +463,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91rm9200_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 5652dde4bbe2..151fec4ccf2f 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -468,7 +468,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9260_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 4db961a93085..8a29c6cbe08c 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -285,7 +285,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9261_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index fe99206de880..8d443225266e 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -542,7 +542,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9263_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index fe4ae22e8561..b8d4114ea228 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -314,7 +314,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9rl_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index f44a2e7272e3..55293a78cfac 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -104,6 +104,8 @@ static void __init soc_detect(u32 dbgu_base) switch (socid) { case ARCH_ID_AT91RM9200: at91_soc_initdata.type = AT91_SOC_RM9200; + if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE) + at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; at91_boot_soc = at91rm9200_soc; break; @@ -146,7 +148,7 @@ static void __init soc_detect(u32 dbgu_base) } /* at91sam9g10 */ - if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) { + if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) { at91_soc_initdata.type = AT91_SOC_SAM9G10; at91_boot_soc = at91sam9261_soc; } diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h index 3ad9f946a9e8..11799c33475e 100644 --- a/arch/arm/mach-dove/include/mach/pm.h +++ b/arch/arm/mach-dove/include/mach/pm.h @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin) static inline int irq_to_pmu(int irq) { - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) return irq - IRQ_DOVE_PMU_START; return -EINVAL; diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index f07fd16e0c9b..9f2fd1001746 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d) int pin = irq_to_pmu(d->irq); u32 u; + /* + * The PMU mask register is not RW0C: it is RW. This means that + * the bits take whatever value is written to them; if you write + * a '1', you will set the interrupt. + * + * Unfortunately this means there is NO race free way to clear + * these interrupts. + * + * So, let's structure the code so that the window is as small as + * possible. + */ u = ~(1 << (pin & 31)); - writel(u, PMU_INTERRUPT_CAUSE); + u &= readl_relaxed(PMU_INTERRUPT_CAUSE); + writel_relaxed(u, PMU_INTERRUPT_CAUSE); } static struct irq_chip pmu_irq_chip = { diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 692c6e22523b..0291508a2dee 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void) : "cc"); } -static inline void cpu_leave_lowpower(void) -{ - unsigned int v; - - asm volatile( - "mrc p15, 0, %0, c1, c0, 0\n" - " orr %0, %0, %1\n" - " mcr p15, 0, %0, c1, c0, 0\n" - " mrc p15, 0, %0, c1, c0, 1\n" - " orr %0, %0, %2\n" - " mcr p15, 0, %0, c1, c0, 1\n" - : "=&r" (v) - : "Ir" (CR_C), "Ir" (0x40) - : "cc"); -} - /* * platform-specific code to shutdown a CPU * @@ -67,9 +51,8 @@ void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); imx_enable_cpu(cpu, false); - cpu_do_idle(); - cpu_leave_lowpower(); - /* We should never return from idle */ - panic("cpu %d unexpectedly exit from shutdown\n", cpu); + /* spin here until hardware takes it down */ + while (1) + ; } diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index f56a0118c1bb..c46d20e5f3fd 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c @@ -212,14 +212,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys) return 1; } +/* + * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it + * is operating as a root complex this needs to be switched to + * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on + * the device. Decoding setup is handled by the orion code. + */ static void __devinit rc_pci_fixup(struct pci_dev *dev) { - /* - * Prevent enumeration of root complex. - */ if (dev->bus->parent == NULL && dev->devfn == 0) { int i; + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_HOST << 8; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h index a65867209aa0..b4eb3daf037b 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h @@ -462,6 +462,9 @@ GPIO76_LCD_PCLK, \ GPIO77_LCD_BIAS +/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */ +#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT) +#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT) extern int keypad_set_wake(unsigned int on); #endif /* __ASM_ARCH_MFP_PXA27X_H */ diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h index b7de471b273a..b802f285fe00 100644 --- a/arch/arm/mach-pxa/include/mach/smemc.h +++ b/arch/arm/mach-pxa/include/mach/smemc.h @@ -37,6 +37,7 @@ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */ +#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */ /* * More handy macros for PCMCIA diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 4726c246dcdc..a2fe795bae14 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void) EXPORT_SYMBOL(pxa27x_clear_otgph); static unsigned long ac97_reset_config[] = { - GPIO113_GPIO, + GPIO113_AC97_nRESET_GPIO_HIGH, GPIO113_AC97_nRESET, - GPIO95_GPIO, + GPIO95_AC97_nRESET_GPIO_HIGH, GPIO95_AC97_nRESET, }; diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c index 79923058d10f..f38aa890b2c9 100644 --- a/arch/arm/mach-pxa/smemc.c +++ b/arch/arm/mach-pxa/smemc.c @@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void) __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); + /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ + __raw_writel(0x2, CSMSADRCFG); } static struct syscore_ops smemc_syscore_ops = { @@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = { static int __init smemc_init(void) { - if (cpu_is_pxa3xx()) + if (cpu_is_pxa3xx()) { + /* + * The only documentation we have on the + * Chip Select Configuration Register (CSMSADRCFG) is that + * it must be programmed to 0x2. + * Moreover, in the bit definitions, the second bit + * (CSMSADRCFG[1]) is called "SETALWAYS". + * Other bits are reserved in this register. + */ + __raw_writel(0x2, CSMSADRCFG); + register_syscore_ops(&smemc_syscore_ops); + } return 0; } diff --git a/arch/arm/mach-realview/include/mach/board-eb.h b/arch/arm/mach-realview/include/mach/board-eb.h index 124bce6b4d7b..a301e61a5554 100644 --- a/arch/arm/mach-realview/include/mach/board-eb.h +++ b/arch/arm/mach-realview/include/mach/board-eb.h @@ -47,7 +47,7 @@ #define REALVIEW_EB_USB_BASE 0x4F000000 /* USB */ #ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB -#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x1F000000 +#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x10100000 #define REALVIEW_EB11MP_L220_BASE 0x10102000 /* L220 registers */ #define REALVIEW_EB11MP_SYS_PLD_CTRL1 0xD8 /* Register offset for MPCore sysctl */ #else diff --git a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S index 4135de87d1f7..13ed33c69113 100644 --- a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S +++ b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S @@ -40,17 +40,17 @@ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART) addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART) bic \rd, \rd, #0xff000 - ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ] + ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)] and \rd, \rd, #0x00ff0000 teq \rd, #0x00440000 @ is it 2440? 1004: - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] moveq \rd, \rd, lsr #SHIFT_2440TXF tst \rd, #S3C2410_UFSTAT_TXFULL .endm .macro fifo_full_s3c2410 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] tst \rd, #S3C2410_UFSTAT_TXFULL .endm @@ -68,18 +68,18 @@ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART) addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART) bic \rd, \rd, #0xff000 - ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ] + ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)] and \rd, \rd, #0x00ff0000 teq \rd, #0x00440000 @ is it 2440? 10000: - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] andne \rd, \rd, #S3C2410_UFSTAT_TXMASK andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm .macro fifo_level_s3c2410 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] and \rd, \rd, #S3C2410_UFSTAT_TXMASK .endm diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h index acbdfecd4186..ccaaafc65c12 100644 --- a/arch/arm/mach-s3c24xx/include/mach/dma.h +++ b/arch/arm/mach-s3c24xx/include/mach/dma.h @@ -24,7 +24,8 @@ */ enum dma_ch { - DMACH_XD0, + DMACH_DT_PROP = -1, /* not yet supported, do not use */ + DMACH_XD0 = 0, DMACH_XD1, DMACH_SDI, DMACH_SPI0, diff --git a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S index 7615a14773fa..6a21beeba1da 100644 --- a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S +++ b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S @@ -31,10 +31,10 @@ @@ try the interrupt offset register, since it is there - ldr \irqstat, [ \base, #INTPND ] + ldr \irqstat, [\base, #INTPND ] teq \irqstat, #0 beq 1002f - ldr \irqnr, [ \base, #INTOFFSET ] + ldr \irqnr, [\base, #INTOFFSET ] mov \tmp, #1 tst \irqstat, \tmp, lsl \irqnr bne 1001f diff --git a/arch/arm/mach-s3c24xx/pm-h1940.S b/arch/arm/mach-s3c24xx/pm-h1940.S index c93bf2db9f4d..6183a688012b 100644 --- a/arch/arm/mach-s3c24xx/pm-h1940.S +++ b/arch/arm/mach-s3c24xx/pm-h1940.S @@ -30,4 +30,4 @@ h1940_pm_return: mov r0, #S3C2410_PA_GPIO - ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ] + ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO] diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S index dd5b6388a5a5..65200ae72c90 100644 --- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S +++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S @@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend) ldr r4, =S3C2410_REFRESH ldr r5, =S3C24XX_MISCCR ldr r6, =S3C2410_CLKCON - ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB) - ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB) - ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB) + ldr r7, [r4] @ get REFRESH (and ensure in TLB) + ldr r8, [r5] @ get MISCCR (and ensure in TLB) + ldr r9, [r6] @ get CLKCON (and ensure in TLB) orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals @@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend) @@ align next bit of code to cache line .align 5 s3c2410_do_sleep: - streq r7, [ r4 ] @ SDRAM sleep command - streq r8, [ r5 ] @ SDRAM power-down config - streq r9, [ r6 ] @ CPU sleep + streq r7, [r4] @ SDRAM sleep command + streq r8, [r5] @ SDRAM power-down config + streq r9, [r6] @ CPU sleep 1: beq 1b mov pc, r14 diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S index c82418ed714d..5adaceb7da13 100644 --- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S +++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S @@ -57,12 +57,12 @@ s3c2412_sleep_enter1: * retry, as simply returning causes the system to lock. */ - ldrne r9, [ r1 ] - strne r9, [ r1 ] - ldrne r9, [ r2 ] - strne r9, [ r2 ] - ldrne r9, [ r3 ] - strne r9, [ r3 ] + ldrne r9, [r1] + strne r9, [r1] + ldrne r9, [r2] + strne r9, [r2] + ldrne r9, [r3] + strne r9, [r3] bne s3c2412_sleep_enter1 mov pc, r14 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 28ca7c259815..a6571368900e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -783,25 +783,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ - size_t left = size; do { size_t len = left; void *vaddr; + page = pfn_to_page(pfn); + if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; - } + if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -818,7 +820,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - page++; + pfn++; left -= len; } while (left); } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 77458548e031..40ca11ed6e5f 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval) struct page *page; struct address_space *mapping; - if (!pte_present_user(pteval)) - return; if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return; diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d9b47a1b97af..1f53fe46183d 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* permanent static mappings from iotable_init() */ #define VM_ARM_STATIC_MAPPING 0x40000000 +/* empty mapping */ +#define VM_ARM_EMPTY_MAPPING 0x20000000 + /* mapping type (attributes) for permanent static mappings */ #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b40aaac684ad..0439314e681d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -498,7 +498,7 @@ static void __init build_mem_type_table(void) #endif for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); + pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } @@ -840,7 +840,7 @@ static void __init pmd_empty_section_gap(unsigned long addr) vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm->addr = (void *)addr; vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = pmd_empty_section_gap; vm_area_add_early(vm); } @@ -853,7 +853,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) continue; addr = (unsigned long)vm->addr; if (addr < next) diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index c2021139cb56..ea94765acf9a 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range) dsb mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT -#ifdef CONFIG_ARM_ERRATA_720789 - mov r3, #0 -#else asid r3, r3 @ mask ASID +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(W(mov) r3, #0 ) + ALT_UP(W(nop) ) #endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 5068fe5a6910..fd433e7a25fd 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -50,22 +50,29 @@ static u32 notrace omap_32k_read_sched_clock(void) * nsecs and adds to a monotonically increasing timespec. */ static struct timespec persistent_ts; -static cycles_t cycles, last_cycles; +static cycles_t cycles; static unsigned int persistent_mult, persistent_shift; +static DEFINE_SPINLOCK(read_persistent_clock_lock); + void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; - cycles_t delta; - struct timespec *tsp = &persistent_ts; + cycles_t last_cycles; + unsigned long flags; + + spin_lock_irqsave(&read_persistent_clock_lock, flags); last_cycles = cycles; cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0; - delta = cycles - last_cycles; - nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift); + nsecs = clocksource_cyc2ns(cycles - last_cycles, + persistent_mult, persistent_shift); + + timespec_add_ns(&persistent_ts, nsecs); + + *ts = persistent_ts; - timespec_add_ns(tsp, nsecs); - *ts = *tsp; + spin_unlock_irqrestore(&read_persistent_clock_lock, flags); } int __init omap_init_clocksource_32k(void) diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 652139c0339e..7ac301ee283c 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable); void omap_dm_timer_disable(struct omap_dm_timer *timer) { - pm_runtime_put(&timer->pdev->dev); + pm_runtime_put_sync(&timer->pdev->dev); } EXPORT_SYMBOL_GPL(omap_dm_timer_disable); diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 28f898f75380..db98e7021f0d 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan) * when necessary. */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S index 207e275362a8..f3a9cff6d5d4 100644 --- a/arch/arm/plat-samsung/include/plat/debug-macro.S +++ b/arch/arm/plat-samsung/include/plat/debug-macro.S @@ -14,12 +14,12 @@ /* The S5PV210/S5PC110 implementations are as belows. */ .macro fifo_level_s5pv210 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] and \rd, \rd, #S5PV210_UFSTAT_TXMASK .endm .macro fifo_full_s5pv210 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] tst \rd, #S5PV210_UFSTAT_TXFULL .endm @@ -27,7 +27,7 @@ * most widely re-used */ .macro fifo_level_s3c2440 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] and \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm @@ -36,7 +36,7 @@ #endif .macro fifo_full_s3c2440 rd, rx - ldr \rd, [ \rx, # S3C2410_UFSTAT ] + ldr \rd, [\rx, # S3C2410_UFSTAT] tst \rd, #S3C2440_UFSTAT_TXFULL .endm @@ -45,11 +45,11 @@ #endif .macro senduart,rd,rx - strb \rd, [\rx, # S3C2410_UTXH ] + strb \rd, [\rx, # S3C2410_UTXH] .endm .macro busyuart, rd, rx - ldr \rd, [ \rx, # S3C2410_UFCON ] + ldr \rd, [\rx, # S3C2410_UFCON] tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -60,7 +60,7 @@ 1001: @ busy waiting for non fifo - ldr \rd, [ \rx, # S3C2410_UTRSTAT ] + ldr \rd, [\rx, # S3C2410_UTRSTAT] tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b @@ -68,7 +68,7 @@ .endm .macro waituart,rd,rx - ldr \rd, [ \rx, # S3C2410_UFCON ] + ldr \rd, [\rx, # S3C2410_UFCON] tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -79,7 +79,7 @@ b 1002f 1001: @ idle waiting for non fifo - ldr \rd, [ \rx, # S3C2410_UTRSTAT ] + ldr \rd, [\rx, # S3C2410_UTRSTAT] tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 7788327044ff..50a724a8334d 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,7 +22,7 @@ @ IRQs disabled. @ ENTRY(do_vfp) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT ldr r4, [r10, #TI_PREEMPT] @ get preempt count add r11, r4, #1 @ increment it str r11, [r10, #TI_PREEMPT] @@ -36,7 +36,7 @@ ENTRY(do_vfp) ENDPROC(do_vfp) ENTRY(vfp_null_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -54,7 +54,7 @@ ENDPROC(vfp_null_entry) __INIT ENTRY(vfp_testing_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 3a0efaad6090..6ff903e50199 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -167,7 +167,7 @@ vfp_hw_state_valid: @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -191,7 +191,7 @@ look_for_VFP_exceptions: @ not recognised by VFP DBGSTR "not VFP" -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index b9510fb3ca11..eea6057b9ef1 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -717,11 +717,14 @@ static int __init vfp_init(void) elf_hwcap |= HWCAP_VFPv3; /* - * Check for VFPv3 D16. CPUs in this configuration - * only have 16 x 64bit registers. + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + else + elf_hwcap |= HWCAP_VFPD32; } #endif /* @@ -735,8 +738,10 @@ static int __init vfp_init(void) if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif +#ifdef CONFIG_VFPv3 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; +#endif } } return 0; diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index 32567bc2a421..ac12ae2b9286 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h @@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr) #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) -#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1) -#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1) -#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1) -#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count) -#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count) -#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count) +static inline void outb(unsigned char data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 1, 1); +} +static inline void outw(unsigned short data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 2, 1); +} +static inline void outl(unsigned int data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 4, 1); +} +static inline void outsb(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 1, count); +} +static inline void outsw(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 2, count); +} +static inline void outsl(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 4, count); +} /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 891dad85e8bd..c722027d69ad 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/elfcore.h> #include <linux/mqueue.h> #include <linux/reboot.h> +#include <linux/rcupdate.h> //#define DEBUG @@ -102,6 +103,7 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void); /* @@ -114,6 +116,7 @@ void cpu_idle (void) idle = default_idle; idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index d4de48bd5efe..3941cbc91ff6 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/pagemap.h> +#include <linux/rcupdate.h> #include <asm/asm-offsets.h> #include <asm/uaccess.h> @@ -84,12 +85,14 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { check_pgt_cache(); if (!frv_dma_inprogress && idle) idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 0e9c315be104..f153ed1a4c08 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -36,6 +36,7 @@ #include <linux/reboot.h> #include <linux/fs.h> #include <linux/slab.h> +#include <linux/rcupdate.h> #include <asm/uaccess.h> #include <asm/traps.h> @@ -78,8 +79,10 @@ void (*idle)(void) = default_idle; void cpu_idle(void) { while (1) { + rcu_idle_enter(); while (!need_resched()) idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ce74e143aea3..86d74ab9800c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/kdebug.h> #include <linux/utsname.h> #include <linux/tracehook.h> +#include <linux/rcupdate.h> #include <asm/cpu.h> #include <asm/delay.h> @@ -301,6 +302,7 @@ cpu_idle (void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); if (can_do_pal_halt) { current_thread_info()->status &= ~TS_POLLING; /* @@ -331,6 +333,7 @@ cpu_idle (void) normal_xtp(); #endif } + rcu_idle_exit(); schedule_preempt_disabled(); check_pgt_cache(); if (cpu_is_offline(cpu)) diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 3a4a32b27208..384e63f3a4c4 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -26,6 +26,7 @@ #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/hardirq.h> +#include <linux/rcupdate.h> #include <asm/io.h> #include <asm/uaccess.h> @@ -82,6 +83,7 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void) = pm_idle; @@ -90,6 +92,7 @@ void cpu_idle (void) idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 60e88660169c..93fe83e7ffb5 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -156,7 +156,7 @@ typedef struct sigaltstack { static inline void sigaddset(sigset_t *set, int _sig) { asm ("bfset %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -164,7 +164,7 @@ static inline void sigaddset(sigset_t *set, int _sig) static inline void sigdelset(sigset_t *set, int _sig) { asm ("bfclr %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -180,7 +180,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) int ret; asm ("bfextu %1{%2,#1},%0" : "=d" (ret) - : "od" (*set), "id" ((_sig-1) ^ 31) + : "o" (*set), "id" ((_sig-1) ^ 31) : "cc"); return ret; } diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index c488e3cfab53..ac2892e49c7c 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/reboot.h> #include <linux/init_task.h> #include <linux/mqueue.h> +#include <linux/rcupdate.h> #include <asm/uaccess.h> #include <asm/traps.h> @@ -75,8 +76,10 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 4fedf5a51d96..5c1e75d1c41e 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -225,7 +225,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)" LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS -CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \ +CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/") ifdef CONFIG_64BIT diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 0c6877ea9004..d3d6fa97c09f 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -104,7 +104,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o obj-$(CONFIG_OF) += prom.o -CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) +CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index f4546e97c60d..23817a6e32b6 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; +#ifdef CONFIG_KPROBES + /* + * Return immediately if the kprobes fault notifier has set + * DIE_PAGE_FAULT. + */ + if (cmd == DIE_PAGE_FAULT) + return NOTIFY_DONE; +#endif /* CONFIG_KPROBES */ + /* Userspace events, ignore. */ if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index e9a5fd7277f4..69b17a920049 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -72,9 +72,7 @@ void __noreturn cpu_idle(void) } } #ifdef CONFIG_HOTPLUG_CPU - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && - (system_state == SYSTEM_RUNNING || - system_state == SYSTEM_BOOTING)) + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) play_dead(); #endif rcu_idle_exit(); diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 33aadbcf170b..dcfd573871c1 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); (*nr)++; page++; refs++; diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile index 7120282bf0d8..3eb4a52ff9a7 100644 --- a/arch/mn10300/Makefile +++ b/arch/mn10300/Makefile @@ -26,7 +26,7 @@ CHECKFLAGS += PROCESSOR := unset UNIT := unset -KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33 +KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,) KBUILD_AFLAGS += -mam33 -DCPU=AM33 ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y) diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 14707f25153b..675d8f2c32e9 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/fs.h> #include <linux/slab.h> +#include <linux/rcupdate.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> @@ -107,6 +108,7 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ for (;;) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void); @@ -121,6 +123,7 @@ void cpu_idle(void) } idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 6c6defc24619..af9cf30ed474 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index ee99f2339356..7df49fad29f9 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -12,11 +12,10 @@ #include <linux/bitops.h> #include <linux/spinlock.h> +#include <linux/mm_types.h> #include <asm/processor.h> #include <asm/cache.h> -struct vm_area_struct; - /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -40,7 +39,14 @@ struct vm_area_struct; do{ \ *(pteptr) = (pteval); \ } while(0) -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +extern void purge_tlb_entries(struct mm_struct *, unsigned long); + +#define set_pte_at(mm, addr, ptep, pteval) \ + do { \ + set_pte(ptep, pteval); \ + purge_tlb_entries(mm, addr); \ + } while (0) #endif /* !__ASSEMBLY__ */ @@ -466,6 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old); + purge_tlb_entries(mm, addr); #else pte_t old_pte = *ptep; set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 9d181890a7e3..fa2146341dab 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -420,6 +420,24 @@ void kunmap_parisc(void *addr) EXPORT_SYMBOL(kunmap_parisc); #endif +void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags; + + /* Note: purge_tlb_entries can be called at startup with + no context. */ + + /* Disable preemption while we play with %sr1. */ + preempt_disable(); + mtsp(mm->context, 1); + purge_tlb_start(flags); + pdtlb(addr); + pitlb(addr); + purge_tlb_end(flags); + preempt_enable(); +} +EXPORT_SYMBOL(purge_tlb_entries); + void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index d4b94b395c16..c54a4dbcb389 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -48,6 +48,7 @@ #include <linux/unistd.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> +#include <linux/rcupdate.h> #include <asm/io.h> #include <asm/asm-offsets.h> @@ -69,8 +70,10 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) barrier(); + rcu_idle_exit(); schedule_preempt_disabled(); check_pgt_cache(); } diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index e14132430762..d0ea054bd3d2 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); @@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) compat_sigset_t s; int r; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b932260f47..7ea75d14aa65 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; + addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index ac39e6a3b25a..2974edd999da 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -181,6 +181,14 @@ static inline int pci_device_from_OF_node(struct device_node *np, #if defined(CONFIG_EEH) static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) { + /* + * For those OF nodes whose parent isn't PCI bridge, they + * don't have PCI_DN actually. So we have to skip them for + * any EEH operations. + */ + if (!dn || !PCI_DN(dn)) + return NULL; + return PCI_DN(dn)->edev; } #endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 34b8afe94a50..ec0b529c3950 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 5b25c8060fd6..a892680668d8 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void) void doorbell_cause_ipi(int cpu, unsigned long data) { + /* Order previous accesses vs. msgsnd, which is treated as a store */ + mb(); ppc_msgsnd(PPC_DBELL, 0, data); } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ef2074c3e906..e500969bea0c 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -373,6 +373,12 @@ _GLOBAL(ret_from_fork) li r3,0 b syscall_exit + .section ".toc","aw" +DSCR_DEFAULT: + .tc dscr_default[TC],dscr_default + + .section ".text" + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -512,9 +518,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) - ld r6,_CCR(r1) - mtcrf 0xFF,r6 - #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -523,14 +526,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + lwz r6,THREAD_DSCR_INHERIT(r4) + ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) - cmpd r0,r25 - beq 1f + cmpwi r6,0 + bne 1f + ld r0,0(r7) +1: cmpd r0,r25 + beq 2f mtspr SPRN_DSCR,r0 -1: +2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif + ld r6,_CCR(r1) + mtcrf 0xFF,r6 + /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 58bddee8e1e8..9e07bd038dda 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -422,7 +422,7 @@ _STATIC(__after_prom_start) tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ #endif -#ifdef CONFIG_CRASH_DUMP +#ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d7f609086a99..39833e0b282f 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0; static void kexec_smp_down(void *arg) { local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure our irqs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; while(kexec_all_irq_disabled == 0) @@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void) wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure IRQs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; @@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); local_irq_disable(); + hard_irq_disable(); } #endif /* SMP */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4937c9690090..94178e55f49d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -799,16 +799,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { - if (current->thread.dscr_inherit) { - p->thread.dscr_inherit = 1; - p->thread.dscr = current->thread.dscr; - } else if (0 != dscr_default) { - p->thread.dscr_inherit = 1; - p->thread.dscr = dscr_default; - } else { - p->thread.dscr_inherit = 0; - p->thread.dscr = 0; - } + p->thread.dscr_inherit = current->thread.dscr_inherit; + p->thread.dscr = current->thread.dscr; } #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d9f94410fd7f..ab24aee19bce 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -215,8 +215,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) struct cpu_messages *info = &per_cpu(ipi_message, cpu); char *message = (char *)&info->messages; + /* + * Order previous accesses before accesses in the IPI handler. + */ + smp_mb(); message[msg] = 1; - mb(); + /* + * cause_ipi functions are required to include a full barrier + * before doing whatever causes the IPI. + */ smp_ops->cause_ipi(cpu, info->data); } @@ -228,7 +235,7 @@ irqreturn_t smp_ipi_demux(void) mb(); /* order any irq clear */ do { - all = xchg_local(&info->messages, 0); + all = xchg(&info->messages, 0); #ifdef __BIG_ENDIAN if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3529446c2abd..8302af649219 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 730e69cb7e9b..e7dba0b2a170 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -749,13 +749,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall_tz(void) { - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_mb(); - ++vdso_data->tb_update_count; } static void __init clocksource_init(void) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 158972341a2d..ae0843fa7a61 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47a..ded8a1a04264 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -79,6 +79,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; + vcpu->arch.dcr_is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); @@ -100,6 +101,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; + vcpu->arch.dcr_is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 97612068fae3..f0eee75ac05f 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c @@ -57,7 +57,8 @@ static const char *board[] __initdata = { "amcc,makalu", "apm,klondike", "est,hotfoot", - "plathome,obs600" + "plathome,obs600", + NULL }; static int __init ppc40x_probe(void) diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 978330ccdde6..f80887f40f02 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -208,6 +208,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) u8 __iomem *lbc_lcs0_ba = NULL; u8 __iomem *lbc_lcs1_ba = NULL; phys_addr_t cs0_addr, cs1_addr; + u32 br0, or0, br1, or1; const __be32 *iprop; unsigned int num_laws; u8 b; @@ -256,11 +257,70 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) } num_laws = be32_to_cpup(iprop); - cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br)); - cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br)); + /* + * Indirect mode requires both BR0 and BR1 to be set to "GPCM", + * otherwise writes to these addresses won't actually appear on the + * local bus, and so the PIXIS won't see them. + * + * In FCM mode, writes go to the NAND controller, which does not pass + * them to the localbus directly. So we force BR0 and BR1 into GPCM + * mode, since we don't care about what's behind the localbus any + * more. + */ + br0 = in_be32(&lbc->bank[0].br); + br1 = in_be32(&lbc->bank[1].br); + or0 = in_be32(&lbc->bank[0].or); + or1 = in_be32(&lbc->bank[1].or); + + /* Make sure CS0 and CS1 are programmed */ + if (!(br0 & BR_V) || !(br1 & BR_V)) { + pr_err("p1022ds: CS0 and/or CS1 is not programmed\n"); + goto exit; + } + + /* + * Use the existing BRx/ORx values if it's already GPCM. Otherwise, + * force the values to simple 32KB GPCM windows with the most + * conservative timing. + */ + if ((br0 & BR_MSEL) != BR_MS_GPCM) { + br0 = (br0 & BR_BA) | BR_V; + or0 = 0xFFFF8000 | 0xFF7; + out_be32(&lbc->bank[0].br, br0); + out_be32(&lbc->bank[0].or, or0); + } + if ((br1 & BR_MSEL) != BR_MS_GPCM) { + br1 = (br1 & BR_BA) | BR_V; + or1 = 0xFFFF8000 | 0xFF7; + out_be32(&lbc->bank[1].br, br1); + out_be32(&lbc->bank[1].or, or1); + } + + cs0_addr = lbc_br_to_phys(ecm, num_laws, br0); + if (!cs0_addr) { + pr_err("p1022ds: could not determine physical address for CS0" + " (BR0=%08x)\n", br0); + goto exit; + } + cs1_addr = lbc_br_to_phys(ecm, num_laws, br1); + if (!cs0_addr) { + pr_err("p1022ds: could not determine physical address for CS1" + " (BR1=%08x)\n", br1); + goto exit; + } lbc_lcs0_ba = ioremap(cs0_addr, 1); + if (!lbc_lcs0_ba) { + pr_err("p1022ds: could not ioremap CS0 address %llx\n", + (unsigned long long)cs0_addr); + goto exit; + } lbc_lcs1_ba = ioremap(cs1_addr, 1); + if (!lbc_lcs1_ba) { + pr_err("p1022ds: could not ioremap CS1 address %llx\n", + (unsigned long long)cs1_addr); + goto exit; + } /* Make sure we're in indirect mode first. */ if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) != @@ -435,6 +495,8 @@ static void __init disable_one_node(struct device_node *np, struct property *new prom_update_property(np, new, old); else prom_add_property(np, new); + + pr_info("p1022ds: disabling %s node\n", np->full_name); } /* TRUE if there is a "video=fslfb" command-line parameter. */ @@ -499,28 +561,46 @@ static void __init p1022_ds_setup_arch(void) diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; /* - * Disable the NOR flash node if there is video=fslfb... command-line - * parameter. When the DIU is active, NOR flash is unavailable, so we - * have to disable the node before the MTD driver loads. + * Disable the NOR and NAND flash nodes if there is video=fslfb... + * command-line parameter. When the DIU is active, the localbus is + * unavailable, so we have to disable these nodes before the MTD + * driver loads. */ if (fslfb) { struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc"); if (np) { - np = of_find_compatible_node(np, NULL, "cfi-flash"); - if (np) { + struct device_node *np2; + + of_node_get(np); + np2 = of_find_compatible_node(np, NULL, "cfi-flash"); + if (np2) { static struct property nor_status = { .name = "status", .value = "disabled", .length = sizeof("disabled"), }; - pr_info("p1022ds: disabling %s node", - np->full_name); - disable_one_node(np, &nor_status); - of_node_put(np); + disable_one_node(np2, &nor_status); + of_node_put(np2); + } + + of_node_get(np); + np2 = of_find_compatible_node(np, NULL, + "fsl,elbc-fcm-nand"); + if (np2) { + static struct property nand_status = { + .name = "status", + .value = "disabled", + .length = sizeof("disabled"), + }; + + disable_one_node(np2, &nand_status); + of_node_put(np2); } + + of_node_put(np); } } diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index a75e37dc41aa..41d4b163c063 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -1029,7 +1029,7 @@ static void eeh_add_device_early(struct device_node *dn) { struct pci_controller *phb; - if (!dn || !of_node_to_eeh_dev(dn)) + if (!of_node_to_eeh_dev(dn)) return; phb = of_node_to_eeh_dev(dn)->phb; diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index baf92cd9dfab..041e28d0e702 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c @@ -25,6 +25,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/module.h> #include <linux/pci.h> #include <asm/eeh.h> #include <asm/eeh_event.h> @@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev) return ""; } +/** + * eeh_pcid_get - Get the PCI device driver + * @pdev: PCI device + * + * The function is used to retrieve the PCI device driver for + * the indicated PCI device. Besides, we will increase the reference + * of the PCI device driver to prevent that being unloaded on + * the fly. Otherwise, kernel crash would be seen. + */ +static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) +{ + if (!pdev || !pdev->driver) + return NULL; + + if (!try_module_get(pdev->driver->driver.owner)) + return NULL; + + return pdev->driver; +} + +/** + * eeh_pcid_put - Dereference on the PCI device driver + * @pdev: PCI device + * + * The function is called to do dereference on the PCI device + * driver of the indicated PCI device. + */ +static inline void eeh_pcid_put(struct pci_dev *pdev) +{ + if (!pdev || !pdev->driver) + return; + + module_put(pdev->driver->driver.owner); +} + #if 0 static void print_device_node_tree(struct pci_dn *pdn, int dent) { @@ -126,18 +162,20 @@ static void eeh_enable_irq(struct pci_dev *dev) static int eeh_report_error(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver = dev->driver; + struct pci_driver *driver; dev->error_state = pci_channel_io_frozen; - if (!driver) - return 0; + driver = eeh_pcid_get(dev); + if (!driver) return 0; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) + !driver->err_handler->error_detected) { + eeh_pcid_put(dev); return 0; + } rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); @@ -145,6 +183,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; + eeh_pcid_put(dev); return 0; } @@ -160,12 +199,16 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata) static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver = dev->driver; + struct pci_driver *driver; + + driver = eeh_pcid_get(dev); + if (!driver) return 0; - if (!driver || - !driver->err_handler || - !driver->err_handler->mmio_enabled) + if (!driver->err_handler || + !driver->err_handler->mmio_enabled) { + eeh_pcid_put(dev); return 0; + } rc = driver->err_handler->mmio_enabled(dev); @@ -173,6 +216,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; + eeh_pcid_put(dev); return 0; } @@ -189,18 +233,20 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) static int eeh_report_reset(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver = dev->driver; - - if (!driver) - return 0; + struct pci_driver *driver; dev->error_state = pci_channel_io_normal; + driver = eeh_pcid_get(dev); + if (!driver) return 0; + eeh_enable_irq(dev); if (!driver->err_handler || - !driver->err_handler->slot_reset) + !driver->err_handler->slot_reset) { + eeh_pcid_put(dev); return 0; + } rc = driver->err_handler->slot_reset(dev); if ((*res == PCI_ERS_RESULT_NONE) || @@ -208,6 +254,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata) if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; + eeh_pcid_put(dev); return 0; } @@ -222,21 +269,24 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata) */ static int eeh_report_resume(struct pci_dev *dev, void *userdata) { - struct pci_driver *driver = dev->driver; + struct pci_driver *driver; dev->error_state = pci_channel_io_normal; - if (!driver) - return 0; + driver = eeh_pcid_get(dev); + if (!driver) return 0; eeh_enable_irq(dev); if (!driver->err_handler || - !driver->err_handler->resume) + !driver->err_handler->resume) { + eeh_pcid_put(dev); return 0; + } driver->err_handler->resume(dev); + eeh_pcid_put(dev); return 0; } @@ -250,21 +300,24 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata) */ static int eeh_report_failure(struct pci_dev *dev, void *userdata) { - struct pci_driver *driver = dev->driver; + struct pci_driver *driver; dev->error_state = pci_channel_io_perm_failure; - if (!driver) - return 0; + driver = eeh_pcid_get(dev); + if (!driver) return 0; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) + !driver->err_handler->error_detected) { + eeh_pcid_put(dev); return 0; + } driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); + eeh_pcid_put(dev); return 0; } diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index 253dce98c16e..762c5ca37606 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value) static inline void icp_hv_set_qirr(int n_cpu , u8 value) { int hw_cpu = get_hard_smp_processor_id(n_cpu); - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); + long rc; + + /* Make sure all previous accesses are ordered before IPI sending */ + mb(); + rc = plpar_hcall_norets(H_IPI, hw_cpu, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index d80f79d8dd9c..8e1fb8239287 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) #else OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390) +OUTPUT_ARCH(s390:31-bit) #endif ENTRY(startup) diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 234f1d859cea..2e0a15b4359c 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -20,7 +20,7 @@ #define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_USER 0x00003F00UL +#define PSW32_MASK_USER 0x0000FF00UL #define PSW32_ADDR_AMODE 0x80000000UL #define PSW32_ADDR_INSN 0x7FFFFFFFUL diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index aeb77f017985..d3750e79fcc2 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -240,7 +240,7 @@ typedef struct #define PSW_MASK_EA 0x00000000UL #define PSW_MASK_BA 0x00000000UL -#define PSW_MASK_USER 0x00003F00UL +#define PSW_MASK_USER 0x0000FF00UL #define PSW_ADDR_AMODE 0x80000000UL #define PSW_ADDR_INSN 0x7FFFFFFFUL @@ -269,7 +269,7 @@ typedef struct #define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_BA 0x0000000080000000UL -#define PSW_MASK_USER 0x00003F0180000000UL +#define PSW_MASK_USER 0x0000FF0180000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index c447a27a7fdb..945b7cdf4934 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -137,4 +137,32 @@ static inline unsigned long long get_clock_monotonic(void) return get_clock_xt() - sched_clock_base_cc; } +/** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value + * Returns: number of nanoseconds that correspond to the TOD format value + * + * Converting a 64 Bit TOD format value to nanoseconds means that the value + * must be divided by 4.096. In order to achieve that we multiply with 125 + * and divide by 512: + * + * ns = (todval * 125) >> 9; + * + * In order to avoid an overflow with the multiplication we can rewrite this. + * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * we end up with + * + * ns = ((2^32 * th + tl) * 125 ) >> 9; + * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * + */ +static inline unsigned long long tod_to_ns(unsigned long long todval) +{ + unsigned long long ns; + + ns = ((todval >> 32) << 23) * 125; + ns += ((todval & 0xffffffff) * 125) >> 9; + return ns; +} + #endif diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 28040fd5e8a2..0bdca3ac9a61 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -313,6 +313,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) regs->gprs[i] = (__u64) regs32.gprs[i]; @@ -494,7 +498,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); @@ -562,7 +569,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index f7582b27f600..74f58e211ac2 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -148,6 +148,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (user_sregs.regs.psw.mask & PSW_MASK_USER); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; @@ -294,7 +298,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -367,7 +374,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4e1cb1dbcd1..747ab28d0e64 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -64,7 +64,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return (get_clock_monotonic() * 125) >> 9; + return tod_to_ns(get_clock_monotonic()); } /* @@ -121,6 +121,9 @@ static int s390_next_ktime(ktime_t expires, nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); do_div(nsecs, 125); S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); + /* Program the maximum value if we have an overflow (== year 2042) */ + if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) + S390_lowcore.clock_comparator = -1ULL; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 21109c63eb12..1343d7cf4443 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #ifndef CONFIG_64BIT OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390) +OUTPUT_ARCH(s390:31-bit) ENTRY(startup) jiffies = jiffies_64 + 4; #else diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2d9f9a72bb81..10e13b331d38 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -390,7 +390,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; } - sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 217ce44395a4..e00accf9523e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -677,6 +677,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; + /* + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy + * copying in vcpu load/put. Lets update our copies before we save + * it into the save area + */ + save_fp_regs(&vcpu->arch.guest_fpregs); + save_access_regs(vcpu->run->s.regs.acrs); + if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 65cb06e2af4e..4ccf9f54355e 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (end < start) + if ((end < start) || (end > TASK_SIZE)) goto slow_irqon; /* diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 2297be406c61..abe8722adda6 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -171,7 +171,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf, if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val < oprofile_min_interval) oprofile_hw_interval = oprofile_min_interval; @@ -214,7 +214,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0) return -EINVAL; @@ -245,7 +245,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -280,7 +280,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -319,7 +319,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index 2707023c7563..637970cfd3f4 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -27,6 +27,7 @@ #include <linux/reboot.h> #include <linux/elfcore.h> #include <linux/pm.h> +#include <linux/rcupdate.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -50,9 +51,10 @@ void __noreturn cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) barrier(); - + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index f38112be67d2..978b7fd6d474 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -202,9 +202,9 @@ extern void __kernel_vsyscall; if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ else \ - NEW_AUX_ENT(AT_IGNORE, 0); + NEW_AUX_ENT(AT_IGNORE, 0) #else -#define VSYSCALL_AUX_ENT +#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) #endif /* CONFIG_VSYSCALL */ #ifdef CONFIG_SH_FPU diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 177061064ee6..f368cef0379a 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_set_wrprotect(mm, addr, ptep); + pte_t old_pte = *ptep; + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_page(vma, addr); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 28559ce5eeb5..602eca8ef8c4 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -557,11 +557,13 @@ static u64 nop_for_index(int idx) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { - u64 val, mask = mask_for_index(idx); + u64 enc, val, mask = mask_for_index(idx); + + enc = perf_event_get_enc(cpuc->events[idx]); val = cpuc->pcr; val &= ~mask; - val |= hwc->config; + val |= event_encoding(enc, idx); cpuc->pcr = val; pcr_ops->write(cpuc->pcr); @@ -1428,8 +1430,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf *usf, sf; @@ -1450,8 +1450,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { struct sparc_stackf32 *usf, sf; @@ -1470,6 +1468,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 48b0f57b65f7..40837f071432 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -309,9 +309,7 @@ void do_rt_sigreturn(struct pt_regs *regs) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - - if (err) + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 3ee51f189a55..57b7cab1e26a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 1d7e274f3f2b..7f5f65d0b3fd 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -212,24 +212,20 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 - /* Check if force_successful_syscall_return() - * was invoked. - */ - ldub [%g6 + TI_SYS_NOERROR], %l2 - brnz,a,pn %l2, 80f - stb %g0, [%g6 + TI_SYS_NOERROR] - cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 -80: + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + +2: + stb %g0, [%g6 + TI_SYS_NOERROR] /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 +3: stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 ! npc = npc+4 @@ -238,20 +234,20 @@ ret_sys_call: stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 1: + /* Check if force_successful_syscall_return() + * was invoked. + */ + ldub [%g6 + TI_SYS_NOERROR], %l2 + brnz,pn %l2, 2b + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 - or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] - stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] - bne,pn %icc, linux_syscall_trace2 - add %l1, 0x4, %l2 ! npc = npc+4 - stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, 3b + or %g3, %g2, %g3 - b,pt %xcc, rtrap - stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] linux_syscall_trace2: call syscall_trace_leave add %sp, PTREGS_OFF, %o0 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 21faaeea85de..07916182387e 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2099,6 +2099,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2129,15 +2132,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/tile/Makefile b/arch/tile/Makefile index 9520bc5a4b7f..99f461d8d9da 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile @@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) endif endif +# The tile compiler may emit .eh_frame information for backtracing. +# In kernel modules, this causes load failures due to unsupported relocations. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7cbdfdac3c7c..29cf70ff6b43 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1244,10 +1244,6 @@ config HAVE_ARCH_BOOTMEM def_bool y depends on X86_32 && NUMA -config HAVE_ARCH_ALLOC_REMAP - def_bool y - depends on X86_32 && NUMA - config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM diff --git a/arch/x86/Makefile b/arch/x86/Makefile index b1c611e6da67..f1276aaeffdb 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -85,7 +85,7 @@ endif ifdef CONFIG_X86_X32 x32_ld_ok := $(call try-run,\ /bin/echo -e '1: .quad 1b' | \ - $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \ + $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \ $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \ $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n) ifeq ($(x32_ld_ok),y) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index e398bb5d63bb..8a84501acb1b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -28,6 +28,9 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ $(obj)/piggy.o +$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone + ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o endif diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 0cdfc0d2315e..8bb90707cb55 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -12,6 +12,8 @@ #include <asm/setup.h> #include <asm/desc.h> +#undef memcpy /* Use memcpy from misc.c */ + #include "eboot.h" static efi_system_table_t *sys_table; diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index e3e734005e19..66c4cae439c9 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -205,7 +205,7 @@ sysexit_from_sys_call: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jnz ia32_ret_from_sys_call TRACE_IRQS_ON - sti + ENABLE_INTERRUPTS(CLBR_NONE) movl %eax,%esi /* second arg, syscall return value */ cmpl $-MAX_ERRNO,%eax /* is it an error ? */ jbe 1f @@ -215,7 +215,7 @@ sysexit_from_sys_call: call __audit_syscall_exit movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - cli + DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jz \exit diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index c9dcc181d4d1..da37433e3628 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -94,10 +94,12 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; +extern unsigned long x86_efi_facility; extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern int efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); +extern void efi_unmap_memmap(void); #ifndef CONFIG_EFI /* diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 4fa88154e4de..92e05b6c84f8 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(struct task_struct *tsk) typedef struct { int preload; } fpu_switch_t; /* - * FIXME! We could do a totally lazy restore, but we need to - * add a per-cpu "this was the task that last touched the FPU - * on this CPU" variable, and the task needs to have a "I last - * touched the FPU on this CPU" and check them. + * Must be run with preemption disabled: this clears the fpu_owner_task, + * on this CPU. * - * We don't do that yet, so "fpu_lazy_restore()" always returns - * false, but some day.. + * This will disable any lazy FPU state restore of the current FPU state, + * but if the current thread owns the FPU, it will still be saved by. */ +static inline void __cpu_disable_lazy_restore(unsigned int cpu) +{ + per_cpu(fpu_owner_task, cpu) = NULL; +} + static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { return new == percpu_read_stable(fpu_owner_task) && diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 55728e121473..5e0286f8081a 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; #include <asm/numaq.h> -extern void resume_map_numa_kva(pgd_t *pgd); - -#else /* !CONFIG_NUMA */ - -static inline void resume_map_numa_kva(pgd_t *pgd) {} - #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 49afb3f41eb6..3f3dd526b3b8 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -142,12 +142,16 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; } +static inline unsigned long pud_pfn(pud_t pud) +{ + return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; +} + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) static inline int pmd_large(pmd_t pte) { - return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == - (_PAGE_PSE | _PAGE_PRESENT); + return pmd_flags(pte) & _PAGE_PSE; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -415,7 +419,13 @@ static inline int pte_hidden(pte_t pte) static inline int pmd_present(pmd_t pmd) { - return pmd_flags(pmd) & _PAGE_PRESENT; + /* + * Checking for _PAGE_PSE is needed too because + * split_huge_page will temporarily clear the present bit (but + * the _PAGE_PSE flag will remain set at all times while the + * _PAGE_PRESENT bit is clear). + */ + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); } static inline int pmd_none(pmd_t pmd) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index dcfde52979c3..19f16ebaf4fa 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) } #endif -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c34f96c2f7a0..1bd321d00f26 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -50,7 +50,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); -extern int m2p_remove_override(struct page *page, bool clear_pte); +extern int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 73ef56c5a8b3..bda833c5f6ce 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -160,7 +160,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = #endif #ifdef P6_NOP1 -static const unsigned char __initconst_or_module p6nops[] = +static const unsigned char p6nops[] = { P6_NOP1, P6_NOP2, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 991e315f4227..db31a2cfc9a9 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) } early_param("x2apic_phys", set_x2apic_phys_mode); -static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static bool x2apic_fadt_phys(void) { - if (x2apic_phys) - return x2apic_enabled(); - else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && - (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && - x2apic_enabled()) { + if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "System requires x2apic physical mode\n"); - return 1; + return true; } - else - return 0; + return false; +} + +static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); } static void @@ -114,7 +115,7 @@ static void init_x2apic_ldr(void) static int x2apic_phys_probe(void) { - if (x2apic_mode && x2apic_phys) + if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) return 1; return apic == &apic_x2apic_phys; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 146bb6218eec..a9c8a46dd7d3 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -598,6 +598,34 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + checking_wrmsrl(0xc0011021, val); + } + } + + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + checking_wrmsrl(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 0a630dd4b620..646d192b18a2 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void) printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", ms_hyperv.features, ms_hyperv.hints); - clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); + if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) + clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 62d61e9976eb..298dc000378a 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1076,6 +1076,9 @@ void __init memblock_x86_fill(void) memblock_add(ei->addr, ei->size); } + /* throw away partial pages */ + memblock_trim_memory(PAGE_SIZE); + memblock_dump_all(); } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 7b784f4ef1e4..2af4ccd88d16 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1025,7 +1025,7 @@ ENTRY(xen_sysenter_target) ENTRY(xen_hypervisor_callback) CFI_STARTPROC - pushl_cfi $0 + pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL TRACE_IRQS_OFF @@ -1067,14 +1067,15 @@ ENTRY(xen_failsafe_callback) 2: mov 8(%esp),%es 3: mov 12(%esp),%fs 4: mov 16(%esp),%gs + /* EAX == 0 => Category 1 (Bad segment) + EAX != 0 => Category 2 (Bad IRET) */ testl %eax,%eax popl_cfi %eax lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f - addl $16,%esp - jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) -5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) + jmp iret_exc +5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp ret_from_exception CFI_ENDPROC diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index cdc79b5cfcd9..bd6f59203b5f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1351,7 +1351,7 @@ ENTRY(xen_failsafe_callback) CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 - pushq_cfi $0 + pushq_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp error_exit CFI_ENDPROC diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 48d9d4ea1020..992f442ca155 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -5,8 +5,6 @@ #include <asm/setup.h> #include <asm/bios_ebda.h> -#define BIOS_LOWMEM_KILOBYTES 0x413 - /* * The BIOS places the EBDA/XBDA at the top of conventional * memory, and usually decreases the reported amount of @@ -16,17 +14,30 @@ * chipset: reserve a page before VGA to prevent PCI prefetch * into it (errata #56). Usually the page is reserved anyways, * unless you have no PS/2 mouse plugged in. + * + * This functions is deliberately very conservative. Losing + * memory in the bottom megabyte is rarely a problem, as long + * as we have enough memory to install the trampoline. Using + * memory that is in use by the BIOS or by some DMA device + * the BIOS didn't shut down *is* a big problem. */ + +#define BIOS_LOWMEM_KILOBYTES 0x413 +#define LOWMEM_CAP 0x9f000U /* Absolute maximum */ +#define INSANE_CUTOFF 0x20000U /* Less than this = insane */ + void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; - /* To determine the position of the EBDA and the */ - /* end of conventional memory, we need to look at */ - /* the BIOS data area. In a paravirtual environment */ - /* that area is absent. We'll just have to assume */ - /* that the paravirt case can handle memory setup */ - /* correctly, without our help. */ + /* + * To determine the position of the EBDA and the + * end of conventional memory, we need to look at + * the BIOS data area. In a paravirtual environment + * that area is absent. We'll just have to assume + * that the paravirt case can handle memory setup + * correctly, without our help. + */ if (paravirt_enabled()) return; @@ -37,19 +48,23 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ ebda_addr = get_bios_ebda(); - /* Fixup: bios puts an EBDA in the top 64K segment */ - /* of conventional memory, but does not adjust lowmem. */ - if ((lowmem - ebda_addr) <= 0x10000) - lowmem = ebda_addr; + /* + * Note: some old Dells seem to need 4k EBDA without + * reporting so, so just consider the memory above 0x9f000 + * to be off limits (bugzilla 2990). + */ + + /* If the EBDA address is below 128K, assume it is bogus */ + if (ebda_addr < INSANE_CUTOFF) + ebda_addr = LOWMEM_CAP; - /* Fixup: bios does not report an EBDA at all. */ - /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ - if ((ebda_addr == 0) && (lowmem >= 0x9f000)) - lowmem = 0x9f000; + /* If lowmem is less than 128K, assume it is bogus */ + if (lowmem < INSANE_CUTOFF) + lowmem = LOWMEM_CAP; - /* Paranoia: should never happen, but... */ - if ((lowmem == 0) || (lowmem >= 0x100000)) - lowmem = 0x9f000; + /* Use the lower of the lowmem and EBDA markers as the cutoff */ + lowmem = min(lowmem, ebda_addr); + lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */ /* reserve all memory between lowmem and the 1MB mark */ memblock_reserve(lowmem, 0x100000 - lowmem); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ad0de0c2714e..2861b4cafd2e 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -431,7 +431,7 @@ void hpet_msi_unmask(struct irq_data *data) /* unmask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg |= HPET_TN_FSB; + cfg |= HPET_TN_ENABLE | HPET_TN_FSB; hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } @@ -442,7 +442,7 @@ void hpet_msi_mask(struct irq_data *data) /* mask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg &= ~HPET_TN_FSB; + cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 8a2ce8fd41c0..5d8cf0d6796c 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -97,6 +97,7 @@ static unsigned int verify_ucode_size(int cpu, u32 patch_size, #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -105,6 +106,9 @@ static unsigned int verify_ucode_size(int cpu, u32 patch_size, case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; @@ -143,11 +147,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, unsigned int *current_size) { struct microcode_header_amd *mc_hdr; - unsigned int actual_size; + unsigned int actual_size, patch_size; u16 equiv_cpu_id; /* size of the current patch we're staring at */ - *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; + patch_size = *(u32 *)(ucode_ptr + 4); + *current_size = patch_size + SECTION_HDR_SIZE; equiv_cpu_id = find_equiv_id(); if (!equiv_cpu_id) @@ -174,7 +179,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, /* * now that the header looks sane, verify its size */ - actual_size = verify_ucode_size(cpu, *current_size, leftover_size); + actual_size = verify_ucode_size(cpu, patch_size, leftover_size); if (!actual_size) return 0; diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index eb113693f043..8563b64e18cf 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file) unsigned int cpu; struct cpuinfo_x86 *c; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + cpu = iminor(file->f_path.dentry->d_inode); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index cf1178332bc0..c4410fb4938f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,6 +21,7 @@ #include <linux/signal.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> +#include <linux/module.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -165,6 +166,35 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} +EXPORT_SYMBOL_GPL(kernel_stack_pointer); + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 3034ee5afb0f..df1b604c0c1f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -619,7 +619,7 @@ static void native_machine_emergency_restart(void) break; case BOOT_EFI: - if (efi_enabled) + if (efi_enabled(EFI_RUNTIME_SERVICES)) efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d6c956e674cc..dc29333e1bb9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -621,6 +621,83 @@ static __init void reserve_ibft_region(void) static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; +static bool __init snb_gfx_workaround_needed(void) +{ +#ifdef CONFIG_PCI + int i; + u16 vendor, devid; + static const u16 snb_ids[] = { + 0x0102, + 0x0112, + 0x0122, + 0x0106, + 0x0116, + 0x0126, + 0x010a, + }; + + /* Assume no if something weird is going on with PCI */ + if (!early_pci_allowed()) + return false; + + vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); + if (vendor != 0x8086) + return false; + + devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); + for (i = 0; i < ARRAY_SIZE(snb_ids); i++) + if (devid == snb_ids[i]) + return true; +#endif + + return false; +} + +/* + * Sandy Bridge graphics has trouble with certain ranges, exclude + * them from allocation. + */ +static void __init trim_snb_memory(void) +{ + static const unsigned long bad_pages[] = { + 0x20050000, + 0x20110000, + 0x20130000, + 0x20138000, + 0x40004000, + }; + int i; + + if (!snb_gfx_workaround_needed()) + return; + + printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); + + /* + * Reserve all memory below the 1 MB mark that has not + * already been reserved. + */ + memblock_reserve(0, 1<<20); + + for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { + if (memblock_reserve(bad_pages[i], PAGE_SIZE)) + printk(KERN_WARNING "failed to reserve 0x%08lx\n", + bad_pages[i]); + } +} + +/* + * Here we put platform-specific memory range workarounds, i.e. + * memory known to be corrupt or otherwise in need to be reserved on + * specific platforms. + * + * If this gets used more widely it could use a real dispatch mechanism. + */ +static void __init trim_platform_memory_ranges(void) +{ + trim_snb_memory(); +} + static void __init trim_bios_range(void) { /* @@ -641,6 +718,7 @@ static void __init trim_bios_range(void) * take them out. */ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } @@ -741,15 +819,15 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EFI if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, "EL32", 4)) { - efi_enabled = 1; - efi_64bit = false; + set_bit(EFI_BOOT, &x86_efi_facility); } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, "EL64", 4)) { - efi_enabled = 1; - efi_64bit = true; + set_bit(EFI_BOOT, &x86_efi_facility); + set_bit(EFI_64BIT, &x86_efi_facility); } - if (efi_enabled && efi_memblock_x86_reserve_range()) - efi_enabled = 0; + + if (efi_enabled(EFI_BOOT)) + efi_memblock_x86_reserve_range(); #endif x86_init.oem.arch_setup(); @@ -822,7 +900,7 @@ void __init setup_arch(char **cmdline_p) finish_e820_parsing(); - if (efi_enabled) + if (efi_enabled(EFI_BOOT)) efi_init(); dmi_scan_machine(); @@ -905,7 +983,7 @@ void __init setup_arch(char **cmdline_p) * The EFI specification says that boot service code won't be called * after ExitBootServices(). This is, in fact, a lie. */ - if (efi_enabled) + if (efi_enabled(EFI_MEMMAP)) efi_reserve_boot_services(); /* preallocate 4k for mptable mpc */ @@ -920,6 +998,8 @@ void __init setup_arch(char **cmdline_p) setup_trampolines(); + trim_platform_memory_ranges(); + init_gbpages(); /* max_pfn_mapped is updated here */ @@ -928,8 +1008,22 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { - max_pfn_mapped = init_memory_mapping(1UL<<32, - max_pfn<<PAGE_SHIFT); + int i; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, + NULL) { + + end = PFN_PHYS(end_pfn); + if (end <= (1UL<<32)) + continue; + + start = PFN_PHYS(start_pfn); + max_pfn_mapped = init_memory_mapping( + max((1UL<<32), start), end); + } + /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } @@ -1027,7 +1121,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) - if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) + if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; @@ -1042,6 +1136,18 @@ void __init setup_arch(char **cmdline_p) mcheck_init(); arch_init_ideal_nops(); + +#ifdef CONFIG_EFI + /* Once setup is done above, unmap the EFI memory map on + * mismatched firmware/kernel archtectures since there is no + * support for runtime services. + */ + if (efi_enabled(EFI_BOOT) && + IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) { + pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); + efi_unmap_memmap(); + } +#endif } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6e1e406038c2..849cdcf2e76a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -66,6 +66,8 @@ #include <asm/mwait.h> #include <asm/apic.h> #include <asm/io_apic.h> +#include <asm/i387.h> +#include <asm/fpu-internal.h> #include <asm/setup.h> #include <asm/uv/uv.h> #include <linux/mc146818rtc.h> @@ -851,6 +853,9 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + /* the FPU context is blank, nobody can own it */ + __cpu_disable_lazy_restore(cpu); + err = do_boot_cpu(apicid, cpu); if (err) { pr_debug("do_boot_cpu failed %d\n", err); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 26d1fb437eb5..638de3efd7a3 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -23,6 +23,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; + if (!static_cpu_has(X86_FEATURE_XSAVE)) + return 0; + best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 185a2b823a2d..e28fb97a1a88 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5697,6 +5697,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int pending_vec, max_bits, idx; struct desc_ptr dt; + if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) + return -EINVAL; + dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3ecfd1aaf214..e922e01868fb 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -747,13 +747,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, return; } #endif + /* Kernel addresses are always protection faults: */ + if (address >= TASK_SIZE) + error_code |= PF_PROT; - if (unlikely(show_unhandled_signals)) + if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); - /* Kernel addresses are always protection faults: */ tsk->thread.cr2 = address; - tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f6679a7fb8ca..b91e48512425 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) } /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; + pte_t *pte; if (!vma_shareable(vma, addr)) - return; + return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: + pte = (pte_t *)pmd_alloc(mm, pud, addr); mutex_unlock(&mapping->i_mmap_mutex); + return pte; } /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + pte = huge_pmd_share(mm, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 4f0cec7e4ffb..fae70904ed08 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -29,36 +29,50 @@ int direct_gbpages #endif ; -static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) +struct map_range { + unsigned long start; + unsigned long end; + unsigned page_size_mask; +}; + +/* + * First calculate space needed for kernel direct mapping page tables to cover + * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB + * pages. Then find enough contiguous space for those page tables. + */ +static void __init find_early_table_space(struct map_range *mr, int nr_range) { - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; + int i; + unsigned long puds = 0, pmds = 0, ptes = 0, tables; + unsigned long start = 0, good_end; phys_addr_t base; - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); + for (i = 0; i < nr_range; i++) { + unsigned long range, extra; - if (use_gbpages) { - unsigned long extra; + range = mr[i].end - mr[i].start; + puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; - extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); - pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; - } else - pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - - tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); + if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { + extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); + pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; + } else { + pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; + } - if (use_pse) { - unsigned long extra; - - extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); + if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { + extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); #ifdef CONFIG_X86_32 - extra += PMD_SIZE; + extra += PMD_SIZE; #endif - ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; - } else - ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; + ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; + } else { + ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; + } + } + tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); + tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); #ifdef CONFIG_X86_32 @@ -75,8 +89,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); - printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); + printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", + mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, + (pgt_buf_top << PAGE_SHIFT) - 1); } void __init native_pagetable_reserve(u64 start, u64 end) @@ -84,12 +99,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) memblock_reserve(start, end - start); } -struct map_range { - unsigned long start; - unsigned long end; - unsigned page_size_mask; -}; - #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -261,7 +270,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * nodes are discovered. */ if (!after_bootmem) - find_early_table_space(end, use_pse, use_gbpages); + find_early_table_space(mr, nr_range); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index fc18be0f6f29..faf7a6830c0b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -821,6 +821,9 @@ int kern_addr_valid(unsigned long addr) if (pud_none(*pud)) return 0; + if (pud_large(*pud)) + return pfn_valid(pud_pfn(*pud)); + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 19d3fa08b119..c1e83944abfe 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) static void __init setup_node_data(int nid, u64 start, u64 end) { const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); - bool remapped = false; u64 nd_pa; void *nd; int tnid; @@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end) if (end && (end - start) < NODE_MIN_SIZE) return; - /* initialize remap allocator before aligning to ZONE_ALIGN */ - init_alloc_remap(nid, start, end); - start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", nid, start, end); /* - * Allocate node data. Try remap allocator first, node-local - * memory and then any node. Never allocate in DMA zone. + * Allocate node data. Try node-local memory and then any node. + * Never allocate in DMA zone. */ - nd = alloc_remap(nid, nd_size); - if (nd) { - nd_pa = __pa(nd); - remapped = true; - } else { - nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) { - pr_err("Cannot find %zu bytes in node %d\n", - nd_size, nid); - return; - } - nd = __va(nd_pa); + nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; } + nd = __va(nd_pa); /* report and initialize */ - printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", - nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); + printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n", + nd_pa, nd_pa + nd_size - 1); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (!remapped && tnid != nid) + if (tnid != nid) printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); node_data[nid] = nd; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 534255a36b6b..73a6d7395bd3 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, extern unsigned long highend_pfn, highstart_pfn; -#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) - -static void *node_remap_start_vaddr[MAX_NUMNODES]; -void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); - -/* - * Remap memory allocator - */ -static unsigned long node_remap_start_pfn[MAX_NUMNODES]; -static void *node_remap_end_vaddr[MAX_NUMNODES]; -static void *node_remap_alloc_vaddr[MAX_NUMNODES]; - -/** - * alloc_remap - Allocate remapped memory - * @nid: NUMA node to allocate memory from - * @size: The size of allocation - * - * Allocate @size bytes from the remap area of NUMA node @nid. The - * size of the remap area is predetermined by init_alloc_remap() and - * only the callers considered there should call this function. For - * more info, please read the comment on top of init_alloc_remap(). - * - * The caller must be ready to handle allocation failure from this - * function and fall back to regular memory allocator in such cases. - * - * CONTEXT: - * Single CPU early boot context. - * - * RETURNS: - * Pointer to the allocated memory on success, %NULL on failure. - */ -void *alloc_remap(int nid, unsigned long size) -{ - void *allocation = node_remap_alloc_vaddr[nid]; - - size = ALIGN(size, L1_CACHE_BYTES); - - if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) - return NULL; - - node_remap_alloc_vaddr[nid] += size; - memset(allocation, 0, size); - - return allocation; -} - -#ifdef CONFIG_HIBERNATION -/** - * resume_map_numa_kva - add KVA mapping to the temporary page tables created - * during resume from hibernation - * @pgd_base - temporary resume page directory - */ -void resume_map_numa_kva(pgd_t *pgd_base) -{ - int node; - - for_each_online_node(node) { - unsigned long start_va, start_pfn, nr_pages, pfn; - - start_va = (unsigned long)node_remap_start_vaddr[node]; - start_pfn = node_remap_start_pfn[node]; - nr_pages = (node_remap_end_vaddr[node] - - node_remap_start_vaddr[node]) >> PAGE_SHIFT; - - printk(KERN_DEBUG "%s: node %d\n", __func__, node); - - for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { - unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); - pgd_t *pgd = pgd_base + pgd_index(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); - - set_pmd(pmd, pfn_pmd(start_pfn + pfn, - PAGE_KERNEL_LARGE_EXEC)); - - printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", - __func__, vaddr, start_pfn + pfn); - } - } -} -#endif - -/** - * init_alloc_remap - Initialize remap allocator for a NUMA node - * @nid: NUMA node to initizlie remap allocator for - * - * NUMA nodes may end up without any lowmem. As allocating pgdat and - * memmap on a different node with lowmem is inefficient, a special - * remap allocator is implemented which can be used by alloc_remap(). - * - * For each node, the amount of memory which will be necessary for - * pgdat and memmap is calculated and two memory areas of the size are - * allocated - one in the node and the other in lowmem; then, the area - * in the node is remapped to the lowmem area. - * - * As pgdat and memmap must be allocated in lowmem anyway, this - * doesn't waste lowmem address space; however, the actual lowmem - * which gets remapped over is wasted. The amount shouldn't be - * problematic on machines this feature will be used. - * - * Initialization failure isn't fatal. alloc_remap() is used - * opportunistically and the callers will fall back to other memory - * allocation mechanisms on failure. - */ -void __init init_alloc_remap(int nid, u64 start, u64 end) -{ - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long end_pfn = end >> PAGE_SHIFT; - unsigned long size, pfn; - u64 node_pa, remap_pa; - void *remap_va; - - /* - * The acpi/srat node info can show hot-add memroy zones where - * memory could be added but not currently present. - */ - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", - nid, start_pfn, end_pfn); - - /* calculate the necessary space aligned to large page size */ - size = node_memmap_size_bytes(nid, start_pfn, end_pfn); - size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); - size = ALIGN(size, LARGE_PAGE_BYTES); - - /* allocate node memory and the lowmem remap area */ - node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); - if (!node_pa) { - pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", - size, nid); - return; - } - memblock_reserve(node_pa, size); - - remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, - max_low_pfn << PAGE_SHIFT, - size, LARGE_PAGE_BYTES); - if (!remap_pa) { - pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", - size, nid); - memblock_free(node_pa, size); - return; - } - memblock_reserve(remap_pa, size); - remap_va = phys_to_virt(remap_pa); - - /* perform actual remap */ - for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) - set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), - (node_pa >> PAGE_SHIFT) + pfn, - PAGE_KERNEL_LARGE); - - /* initialize remap allocator parameters */ - node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; - node_remap_start_vaddr[nid] = remap_va; - node_remap_end_vaddr[nid] = remap_va + size; - node_remap_alloc_vaddr[nid] = remap_va; - - printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", - nid, node_pa, node_pa + size, remap_va, remap_va + size); -} - void __init initmem_init(void) { x86_numa_init(); diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index 7178c3afe05e..ad86ec91e640 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -21,12 +21,6 @@ void __init numa_reset_distance(void); void __init x86_numa_init(void); -#ifdef CONFIG_X86_64 -static inline void init_alloc_remap(int nid, u64 start, u64 end) { } -#else -void __init init_alloc_remap(int nid, u64 start, u64 end); -#endif - #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 26b8a8514ee5..48768df2471a 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, val |= counter_config->extra; event &= model->event_mask ? model->event_mask : 0xFF; val |= event & 0xFF; - val |= (event & 0x0F00) << 24; + val |= (u64)(event & 0x0F00) << 24; return val; } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index d0e6e403b4f6..5dd467bd6121 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -519,3 +519,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar); + +/* + * Twinhead H12Y needs us to block out a region otherwise we map devices + * there and any access kills the box. + * + * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231 + * + * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor) + */ +static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev) +{ + if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) { + pr_info("Reserving memory on Twinhead H12Y\n"); + request_mem_region(0xFFB00000, 0x100000, "twinhead"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 92660edaa1e7..1e406371a683 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -50,9 +50,6 @@ #define EFI_DEBUG 1 -int efi_enabled; -EXPORT_SYMBOL(efi_enabled); - struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, @@ -68,15 +65,29 @@ EXPORT_SYMBOL(efi); struct efi_memory_map memmap; -bool efi_64bit; -static bool efi_native; - static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static inline bool efi_is_native(void) +{ + return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); +} + +unsigned long x86_efi_facility; + +/* + * Returns 1 if 'facility' is enabled, 0 otherwise. + */ +int efi_enabled(int facility) +{ + return test_bit(facility, &x86_efi_facility) != 0; +} +EXPORT_SYMBOL(efi_enabled); + +static bool disable_runtime = false; static int __init setup_noefi(char *arg) { - efi_enabled = 0; + disable_runtime = true; return 0; } early_param("noefi", setup_noefi); @@ -419,10 +430,22 @@ void __init efi_reserve_boot_services(void) } } -static void __init efi_free_boot_services(void) +void __init efi_unmap_memmap(void) +{ + clear_bit(EFI_MEMMAP, &x86_efi_facility); + if (memmap.map) { + early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); + memmap.map = NULL; + } +} + +void __init efi_free_boot_services(void) { void *p; + if (!efi_is_native()) + return; + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; unsigned long long start = md->phys_addr; @@ -438,11 +461,13 @@ static void __init efi_free_boot_services(void) free_bootmem_late(start, size); } + + efi_unmap_memmap(); } static int __init efi_systab_init(void *phys) { - if (efi_64bit) { + if (efi_enabled(EFI_64BIT)) { efi_system_table_64_t *systab64; u64 tmp = 0; @@ -534,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables) void *config_tables, *tablep; int i, sz; - if (efi_64bit) + if (efi_enabled(EFI_64BIT)) sz = sizeof(efi_config_table_64_t); else sz = sizeof(efi_config_table_32_t); @@ -554,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables) efi_guid_t guid; unsigned long table; - if (efi_64bit) { + if (efi_enabled(EFI_64BIT)) { u64 table64; guid = ((efi_config_table_64_t *)tablep)->guid; table64 = ((efi_config_table_64_t *)tablep)->table; @@ -666,22 +691,19 @@ void __init efi_init(void) if (boot_params.efi_info.efi_systab_hi || boot_params.efi_info.efi_memmap_hi) { pr_info("Table located above 4GB, disabling EFI.\n"); - efi_enabled = 0; return; } efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; - efi_native = !efi_64bit; #else efi_phys.systab = (efi_system_table_t *) (boot_params.efi_info.efi_systab | ((__u64)boot_params.efi_info.efi_systab_hi<<32)); - efi_native = efi_64bit; #endif - if (efi_systab_init(efi_phys.systab)) { - efi_enabled = 0; + if (efi_systab_init(efi_phys.systab)) return; - } + + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* * Show what we know for posterity @@ -699,29 +721,31 @@ void __init efi_init(void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); - if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { - efi_enabled = 0; + if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) return; - } + + set_bit(EFI_CONFIG_TABLES, &x86_efi_facility); /* * Note: We currently don't support runtime services on an EFI * that doesn't match the kernel 32/64-bit mode. */ - if (!efi_native) + if (!efi_is_native()) pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); - else if (efi_runtime_init()) { - efi_enabled = 0; - return; + else { + if (disable_runtime || efi_runtime_init()) + return; + set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); } - if (efi_memmap_init()) { - efi_enabled = 0; + if (efi_memmap_init()) return; - } + + set_bit(EFI_MEMMAP, &x86_efi_facility); + #ifdef CONFIG_X86_32 - if (efi_native) { + if (efi_is_native()) { x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; } @@ -787,8 +811,10 @@ void __init efi_enter_virtual_mode(void) * non-native EFI */ - if (!efi_native) - goto out; + if (!efi_is_native()) { + efi_unmap_memmap(); + return; + } /* Merge contiguous regions of the same type and attribute */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { @@ -878,18 +904,12 @@ void __init efi_enter_virtual_mode(void) } /* - * Thankfully, it does seem that no runtime services other than - * SetVirtualAddressMap() will touch boot services code, so we can - * get rid of it all at this point - */ - efi_free_boot_services(); - - /* * Now that EFI is in virtual mode, update the function * pointers in the runtime service table to the new virtual addresses. * * Call EFI services through wrapper functions. */ + efi.runtime_version = efi_systab.hdr.revision; efi.get_time = virt_efi_get_time; efi.set_time = virt_efi_set_time; efi.get_wakeup_time = virt_efi_get_wakeup_time; @@ -906,9 +926,6 @@ void __init efi_enter_virtual_mode(void) if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); -out: - early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); - memmap.map = NULL; kfree(new_memmap); } @@ -935,6 +952,9 @@ u64 efi_mem_attributes(unsigned long phys_addr) efi_memory_desc_t *md; void *p; + if (!efi_enabled(EFI_MEMMAP)) + return 0; + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if ((md->phys_addr <= phys_addr) && diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac3aa54e2654..0fba86da5895 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,7 +38,7 @@ #include <asm/cacheflush.h> #include <asm/fixmap.h> -static pgd_t save_pgd __initdata; +static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; static void __init early_code_mapping_set_exec(int executable) @@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable) void __init efi_call_phys_prelog(void) { unsigned long vaddress; + int pgd; + int n_pgds; early_code_mapping_set_exec(1); local_irq_save(efi_flags); - vaddress = (unsigned long)__va(0x0UL); - save_pgd = *pgd_offset_k(0x0UL); - set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); + + for (pgd = 0; pgd < n_pgds; pgd++) { + save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } __flush_tlb_all(); } @@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void) /* * After the lock is released, the original page table is restored. */ - set_pgd(pgd_offset_k(0x0UL), save_pgd); + int pgd; + int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); early_code_mapping_set_exec(0); diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 74202c1910cd..7d28c885d238 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -129,8 +129,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) } } - resume_map_numa_kva(pgd_base); - return 0; } diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index dd29a9ea27c5..fd1f10348130 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -60,8 +60,8 @@ 51 common getsockname sys_getsockname 52 common getpeername sys_getpeername 53 common socketpair sys_socketpair -54 common setsockopt sys_setsockopt -55 common getsockopt sys_getsockopt +54 64 setsockopt sys_setsockopt +55 64 getsockopt sys_getsockopt 56 common clone stub_clone 57 common fork stub_fork 58 common vfork stub_vfork @@ -351,3 +351,5 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev +541 x32 setsockopt compat_sys_setsockopt +542 x32 getsockopt compat_sys_getsockopt diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 40edfc37a6fd..59100548cbdb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -942,7 +942,16 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } - +#ifdef CONFIG_X86_64 +static inline unsigned long xen_read_cr8(void) +{ + return 0; +} +static inline void xen_write_cr8(unsigned long val) +{ + BUG_ON(val); +} +#endif static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -1111,6 +1120,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_cr4_safe = native_read_cr4_safe, .write_cr4 = xen_write_cr4, +#ifdef CONFIG_X86_64 + .read_cr8 = xen_read_cr8, + .write_cr8 = xen_write_cr8, +#endif + .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, @@ -1121,6 +1135,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, + .read_tscp = native_read_tscp, + .iret = xen_iret, .irq_enable_sysexit = xen_sysexit, #ifdef CONFIG_X86_64 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 69f5857660ac..5cb8e27d43d2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1203,6 +1203,25 @@ unsigned long xen_read_cr2_direct(void) return this_cpu_read(xen_vcpu_info.arch.cr2); } +void xen_flush_tlb_all(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + trace_xen_mmu_flush_tlb_all(0); + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_ALL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + preempt_enable(); +} static void xen_flush_tlb(void) { struct mmuext_op *op; @@ -2364,7 +2383,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, err = 0; out: - flush_tlb_all(); + xen_flush_tlb_all(); return err; } diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 00a038540c8e..3ace81769478 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -714,9 +714,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, xen_mc_issue(PARAVIRT_LAZY_MMU); } - /* let's use dev_bus_addr to record the old mfn instead */ - kmap_op->dev_bus_addr = page->index; - page->index = (unsigned long) kmap_op; } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); @@ -743,7 +740,8 @@ int m2p_add_override(unsigned long mfn, struct page *page, return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); -int m2p_remove_override(struct page *page, bool clear_pte) +int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long mfn; @@ -773,10 +771,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) WARN_ON(!PagePrivate(page)); ClearPagePrivate(page); - if (clear_pte) { - struct gnttab_map_grant_ref *map_op = - (struct gnttab_map_grant_ref *) page->index; - set_phys_to_machine(pfn, map_op->dev_bus_addr); + set_phys_to_machine(pfn, page->index); + if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_grant_ref *unmap_op; @@ -788,13 +784,13 @@ int m2p_remove_override(struct page *page, bool clear_pte) * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ - if (map_op->handle == -1) + if (kmap_op->handle == -1) xen_mc_flush(); /* - * Now if map_op->handle is negative it means that the + * Now if kmap_op->handle is negative it means that the * hypercall actually returned an error. */ - if (map_op->handle == GNTST_general_error) { + if (kmap_op->handle == GNTST_general_error) { printk(KERN_WARNING "m2p_remove_override: " "pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); @@ -804,8 +800,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) mcs = xen_mc_entry( sizeof(struct gnttab_unmap_grant_ref)); unmap_op = mcs.args; - unmap_op->host_addr = map_op->host_addr; - unmap_op->handle = map_op->handle; + unmap_op->host_addr = kmap_op->host_addr; + unmap_op->handle = kmap_op->handle; unmap_op->dev_bus_addr = 0; MULTI_grant_table_op(mcs.mc, @@ -816,10 +812,9 @@ int m2p_remove_override(struct page *page, bool clear_pte) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); __flush_tlb_single(address); - map_op->host_addr = 0; + kmap_op->host_addr = 0; } - } else - set_phys_to_machine(pfn, page->index); + } /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1ba8dff26753..017d48a26a02 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -17,6 +17,7 @@ #include <asm/e820.h> #include <asm/setup.h> #include <asm/acpi.h> +#include <asm/numa.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> @@ -79,9 +80,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size) memblock_reserve(start, size); xen_max_p2m_pfn = PFN_DOWN(start + size); + for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + + if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) + continue; + WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", + pfn, mfn); - for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } } static unsigned long __init xen_release_chunk(unsigned long start, @@ -424,4 +432,7 @@ void __init xen_arch_setup(void) disable_cpufreq(); WARN_ON(set_pm_idle_to_default()); fiddle_vdso(); +#ifdef CONFIG_NUMA + numa_off = 1; +#endif } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index d69cc6c3f808..67bc7bae746e 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) if (per_cpu(lock_spinners, cpu) == xl) { ADD_STATS(released_slow_kicked, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); - break; } } } diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index b040b0e518ca..7328f71651e1 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -88,11 +88,11 @@ ENTRY(xen_iret) */ #ifdef CONFIG_SMP GET_THREAD_INFO(%eax) - movl TI_cpu(%eax), %eax - movl __per_cpu_offset(,%eax,4), %eax - mov xen_vcpu(%eax), %eax + movl %ss:TI_cpu(%eax), %eax + movl %ss:__per_cpu_offset(,%eax,4), %eax + mov %ss:xen_vcpu(%eax), %eax #else - movl xen_vcpu, %eax + movl %ss:xen_vcpu, %eax #endif /* check IF state we're restoring */ @@ -105,11 +105,11 @@ ENTRY(xen_iret) * resuming the code, so we don't have to be worried about * being preempted to another CPU. */ - setz XEN_vcpu_info_mask(%eax) + setz %ss:XEN_vcpu_info_mask(%eax) xen_iret_start_crit: /* check for unmasked and pending */ - cmpw $0x0001, XEN_vcpu_info_pending(%eax) + cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) /* * If there's something pending, mask events again so we can @@ -117,7 +117,7 @@ xen_iret_start_crit: * touch XEN_vcpu_info_mask. */ jne 1f - movb $1, XEN_vcpu_info_mask(%eax) + movb $1, %ss:XEN_vcpu_info_mask(%eax) 1: popl %eax diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 6a2d6edf8f72..7a41d9ec8ac7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -31,6 +31,7 @@ #include <linux/mqueue.h> #include <linux/fs.h> #include <linux/slab.h> +#include <linux/rcupdate.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -110,8 +111,10 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) platform_idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/block/blk-core.c b/block/blk-core.c index 1f61b74867e4..85fd41003434 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -601,7 +601,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, q->request_fn = rfn; q->prep_rq_fn = NULL; q->unprep_rq_fn = NULL; - q->queue_flags = QUEUE_FLAG_DEFAULT; + q->queue_flags |= QUEUE_FLAG_DEFAULT; /* Override internal queue lock with supplied lock pointer */ if (lock) diff --git a/block/genhd.c b/block/genhd.c index 53c3f7e212a7..2ade7561436c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -25,7 +25,7 @@ static DEFINE_MUTEX(block_class_lock); struct kobject *block_depr; /* for extended dynamic devt allocation, currently only one major is used */ -#define MAX_EXT_DEVT (1 << MINORBITS) +#define NR_EXT_DEVT (1 << MINORBITS) /* For extended devt allocation. ext_devt_mutex prevents look up * results from going away underneath its user. @@ -420,17 +420,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) do { if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) return -ENOMEM; + mutex_lock(&ext_devt_mutex); rc = idr_get_new(&ext_devt_idr, part, &idx); + if (!rc && idx >= NR_EXT_DEVT) { + idr_remove(&ext_devt_idr, idx); + rc = -EBUSY; + } + mutex_unlock(&ext_devt_mutex); } while (rc == -EAGAIN); if (rc) return rc; - if (idx > MAX_EXT_DEVT) { - idr_remove(&ext_devt_idr, idx); - return -EBUSY; - } - *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); return 0; } @@ -644,7 +645,6 @@ void del_gendisk(struct gendisk *disk) disk_part_iter_exit(&piter); invalidate_partition(disk, 0); - blk_free_devt(disk_to_dev(disk)->devt); set_capacity(disk, 0); disk->flags &= ~GENHD_FL_UP; @@ -662,6 +662,7 @@ void del_gendisk(struct gendisk *disk) if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); device_del(disk_to_dev(disk)); + blk_free_devt(disk_to_dev(disk)->devt); } EXPORT_SYMBOL(del_gendisk); diff --git a/block/partition-generic.c b/block/partition-generic.c index 803d1513f4c6..264028c3b8c8 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -260,11 +260,11 @@ void delete_partition(struct gendisk *disk, int partno) if (!part) return; - blk_free_devt(part_devt(part)); rcu_assign_pointer(ptbl->part[partno], NULL); rcu_assign_pointer(ptbl->last_lookup, NULL); kobject_put(part->holder_dir); device_del(part_to_dev(part)); + blk_free_devt(part_devt(part)); hd_struct_put(part); } diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 260fa80ef575..9a87daa6f4fb 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) break; } + if (capable(CAP_SYS_RAWIO)) + return 0; + /* In particular, rule out all resets and host-specific ioctls. */ printk_ratelimited(KERN_WARNING "%s: sending ioctl %x to a partition!\n", current->comm, cmd); - return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD; + return -ENOIOCTLCMD; } EXPORT_SYMBOL(scsi_verify_blk_ioctl); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 671d4d6d14df..7bdd61b867c8 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work) struct crypto_async_request *req, *backlog; cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* Only handle one request at a time to avoid hogging crypto - * workqueue. preempt_disable/enable is used to prevent - * being preempted by cryptd_enqueue_request() */ + /* + * Only handle one request at a time to avoid hogging crypto workqueue. + * preempt_disable/enable is used to prevent being preempted by + * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent + * cryptd_enqueue_request() being accessed from software interrupts. + */ + local_bh_disable(); preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); preempt_enable(); + local_bh_enable(); if (!req) return; diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index abcc6412c244..33214d778881 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -436,6 +436,7 @@ acpi_get_table_with_size(char *signature, return (AE_NOT_FOUND); } +ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) acpi_status acpi_get_table(char *signature, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 7dd3f9fb9f3f..6ea287e22296 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include <linux/dmi.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <asm/unaligned.h> #ifdef CONFIG_ACPI_PROCFS_POWER #include <linux/proc_fs.h> @@ -95,6 +96,18 @@ enum { ACPI_BATTERY_ALARM_PRESENT, ACPI_BATTERY_XINFO_PRESENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, + /* On Lenovo Thinkpad models from 2010 and 2011, the power unit + switches between mWh and mAh depending on whether the system + is running on battery or not. When mAh is the unit, most + reported values are incorrect and need to be adjusted by + 10000/design_voltage. Verified on x201, t410, t410s, and x220. + Pre-2010 and 2012 models appear to always report in mWh and + are thus unaffected (tested with t42, t61, t500, x200, x300, + and x230). Also, in mid-2012 Lenovo issued a BIOS update for + the 2011 models that fixes the issue (tested on x220 with a + post-1.29 BIOS), but as of Nov. 2012, no such update is + available for the 2010 models. */ + ACPI_BATTERY_QUIRK_THINKPAD_MAH, }; struct acpi_battery { @@ -429,6 +442,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) kfree(buffer.pointer); if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) battery->full_charge_capacity = battery->design_capacity; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->design_capacity = battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + /* Curiously, design_capacity_low, unlike the rest of them, + is correct. */ + /* capacity_granularity_* equal 1 on the systems tested, so + it's impossible to tell if they would need an adjustment + or not if their values were higher. */ + } return result; } @@ -477,6 +505,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } return result; } @@ -586,6 +619,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery) mutex_unlock(&battery->sysfs_lock); } +static void find_battery(const struct dmi_header *dm, void *private) +{ + struct acpi_battery *battery = (struct acpi_battery *)private; + /* Note: the hardcoded offsets below have been extracted from + the source code of dmidecode. */ + if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { + const u8 *dmi_data = (const u8 *)(dm + 1); + int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); + if (dm->length >= 18) + dmi_capacity *= dmi_data[17]; + if (battery->design_capacity * battery->design_voltage / 1000 + != dmi_capacity && + battery->design_capacity * 10 == dmi_capacity) + set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags); + } +} + /* * According to the ACPI spec, some kinds of primary batteries can * report percentage battery remaining capacity directly to OS. @@ -611,6 +662,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; } + + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) + return ; + + if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { + const char *s; + s = dmi_get_system_info(DMI_PRODUCT_VERSION); + if (s && !strnicmp(s, "ThinkPad", 8)) { + dmi_walk(find_battery, battery); + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags) && + battery->design_voltage) { + battery->design_capacity = + battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = + battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } + } + } } static int acpi_battery_update(struct acpi_battery *battery) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8da..cf02e971467f 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -954,8 +954,6 @@ static int __init acpi_bus_init(void) status = acpi_ec_ecdt_probe(); /* Ignore result. Not having an ECDT is not fatal. */ - acpi_bus_osc_support(); - status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); @@ -963,6 +961,12 @@ static int __init acpi_bus_init(void) } /* + * _OSC method may exist in module level code, + * so it must be run after ACPI_FULL_INITIALIZATION + */ + acpi_bus_osc_support(); + + /* * _PDC control method may load dynamic SSDT tables, * and we need to install the table handler before that. */ diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 7edaccce6640..a51df9681319 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -71,9 +71,6 @@ enum ec_command { #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ -#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts - per one transaction */ - enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_GPE_STORM, /* GPE storm detected */ @@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); +/* + * If the number of false interrupts per one transaction exceeds + * this threshold, will think there is a GPE storm happened and + * will disable the GPE for normal transaction. + */ +static unsigned int ec_storm_threshold __read_mostly = 8; +module_param(ec_storm_threshold, uint, 0644); +MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); + /* If we find an EC via the ECDT, we need to keep a ptr to its context */ /* External interfaces use first EC only, so remember */ typedef int (*acpi_ec_query_func) (void *data); @@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) msleep(1); /* It is safe to enable the GPE outside of the transaction. */ acpi_enable_gpe(NULL, ec->gpe); - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { + } else if (t->irq_count > ec_storm_threshold) { pr_info(PREFIX "GPE storm detected, " "transactions will use polling mode\n"); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); @@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id) return 0; } +/* + * Clevo M720 notebook actually works ok with IRQ mode, if we lifted + * the GPE storm threshold back to 20 + */ +static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) +{ + pr_debug("Setting the EC GPE storm threshold to 20\n"); + ec_storm_threshold = 20; + return 0; +} + static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, + { + ec_enlarge_storm_threshold, "CLEVO hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, {}, }; - int __init acpi_ec_ecdt_probe(void) { acpi_status status; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c3881b2eb8b2..f48720cf96ff 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void) return acpi_rsdp; #endif - if (efi_enabled) { + if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; else if (efi.acpi != EFI_INVALID_TABLE_ADDR) diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63e..2adef535f15e 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -103,6 +103,7 @@ struct acpi_power_resource { /* List of devices relying on this power resource */ struct acpi_power_resource_device *devices; + struct mutex devices_lock; }; static struct list_head acpi_power_resource_list; @@ -221,7 +222,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device) static int __acpi_power_on(struct acpi_power_resource *resource) { - struct acpi_power_resource_device *device_list = resource->devices; acpi_status status = AE_OK; status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); @@ -234,19 +234,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", resource->name)); - while (device_list) { - acpi_power_on_device(device_list->device); - - device_list = device_list->next; - } - return 0; } static int acpi_power_on(acpi_handle handle) { int result = 0; + bool resume_device = false; struct acpi_power_resource *resource = NULL; + struct acpi_power_resource_device *device_list; result = acpi_power_get_context(handle, &resource); if (result) @@ -262,10 +258,25 @@ static int acpi_power_on(acpi_handle handle) result = __acpi_power_on(resource); if (result) resource->ref_count--; + else + resume_device = true; } mutex_unlock(&resource->resource_lock); + if (!resume_device) + return result; + + mutex_lock(&resource->devices_lock); + + device_list = resource->devices; + while (device_list) { + acpi_power_on_device(device_list->device); + device_list = device_list->next; + } + + mutex_unlock(&resource->devices_lock); + return result; } @@ -351,7 +362,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, if (acpi_power_get_context(res_handle, &resource)) return; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); prev = NULL; curr = resource->devices; while (curr) { @@ -368,7 +379,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, prev = curr; curr = curr->next; } - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); } /* Unlink dev from all power resources in _PR0 */ @@ -409,10 +420,10 @@ static int __acpi_power_resource_register_device( power_resource_device->device = powered_device; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); power_resource_device->next = resource->devices; resource->devices = power_resource_device; - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); return 0; } @@ -457,7 +468,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle) return ret; no_power_resource: - printk(KERN_WARNING PREFIX "Invalid Power Resource to register!"); + printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!"); return -ENODEV; } @@ -715,6 +726,7 @@ static int acpi_power_add(struct acpi_device *device) resource->device = device; mutex_init(&resource->resource_lock); + mutex_init(&resource->devices_lock); strcpy(resource->name, device->pnp.bus_id); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bbac51e79d1f..4a2c131bf086 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -407,6 +407,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event) acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); + break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5bc828bcfdbf..01fd7df65c4f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1011,6 +1011,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) return -EINVAL; } + if (!dev) + return -EINVAL; + dev->cpu = pr->id; dev->safe_state_index = -1; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { @@ -1110,6 +1113,67 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) return ret; } +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int cpu; + struct acpi_processor *_pr; + + if (disabled_by_idle_boot_param()) + return 0; + + if (!pr) + return -EINVAL; + + if (nocst) + return -ENODEV; + + if (!pr->flags.power_setup_done) + return -ENODEV; + + /* + * FIXME: Design the ACPI notification to make it once per + * system instead of once per-cpu. This condition is a hack + * to make the code that updates C-States be called once. + */ + + if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { + + cpuidle_pause_and_lock(); + /* Protect against cpu-hotplug */ + get_online_cpus(); + + /* Disable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + cpuidle_disable_device(&_pr->power.dev); + } + + /* Populate Updated C-state information */ + acpi_processor_get_power_info(pr); + acpi_processor_setup_cpuidle_states(pr); + + /* Enable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + acpi_processor_get_power_info(_pr); + if (_pr->flags.power) { + acpi_processor_setup_cpuidle_cx(_pr); + cpuidle_enable_device(&_pr->power.dev); + } + } + put_online_cpus(); + cpuidle_resume_and_unlock(); + } + + return 0; +} + +static int acpi_processor_registered; + int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97c..619a2e4402df 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -789,8 +789,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_bus_set_run_wake_flags(struct acpi_device *device) { struct acpi_device_id button_device_ids[] = { - {"PNP0C0D", 0}, {"PNP0C0C", 0}, + {"PNP0C0D", 0}, {"PNP0C0E", 0}, {"", 0}, }; @@ -802,6 +802,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; + if (!acpi_match_device_ids(device, &button_device_ids[1])) { + /* Do not use Lid/sleep button for S5 wakeup */ + if (device->wakeup.sleep_state == ACPI_STATE_S5) + device->wakeup.sleep_state = ACPI_STATE_S4; + } device_set_wakeup_capable(&device->dev, true); return; } @@ -1157,7 +1162,7 @@ static void acpi_device_set_id(struct acpi_device *device) acpi_add_id(device, ACPI_DOCK_HID); else if (!acpi_ibm_smbus_match(device)) acpi_add_id(device, ACPI_SMBUS_IBM_HID); - else if (!acpi_device_hid(device) && + else if (list_empty(&device->pnp.ids) && ACPI_IS_ROOT_DEVICE(device->parent)) { acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2377445f4a3d..189c704a950f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -138,6 +138,188 @@ void __init acpi_old_suspend_ordering(void) old_suspend_ordering = true; } +static int __init init_old_suspend_ordering(const struct dmi_system_id *d) +{ + acpi_old_suspend_ordering(); + return 0; +} + +static int __init init_nvs_nosave(const struct dmi_system_id *d) +{ + acpi_nvs_nosave(); + return 0; +} + +static struct dmi_system_id __initdata acpisleep_dmi_table[] = { + { + .callback = init_old_suspend_ordering, + .ident = "Abit KN9 (nForce4 variant)", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), + DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "HP xw4600 Workstation", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Panasonic CF51-2L", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Matsushita Electric Industrial Co.,Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW41E_H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW21E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB17FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR11M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Everex StepNote Series", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB1Z1E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-NW130D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCCW29FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Averatec AV1020-ED2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI DELUXE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI Premium", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR26GN_P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB1S1E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW520F", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54HR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), + }, + }, + {}, +}; + +static void acpi_sleep_dmi_check(void) +{ + dmi_check_system(acpisleep_dmi_table); +} + /** * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. */ @@ -227,6 +409,7 @@ static void acpi_pm_end(void) } #else /* !CONFIG_ACPI_SLEEP */ #define acpi_target_sleep_state ACPI_STATE_S0 +static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND @@ -371,167 +554,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { .end = acpi_pm_end, .recover = acpi_pm_finish, }; - -static int __init init_old_suspend_ordering(const struct dmi_system_id *d) -{ - old_suspend_ordering = true; - return 0; -} - -static int __init init_nvs_nosave(const struct dmi_system_id *d) -{ - acpi_nvs_nosave(); - return 0; -} - -static struct dmi_system_id __initdata acpisleep_dmi_table[] = { - { - .callback = init_old_suspend_ordering, - .ident = "Abit KN9 (nForce4 variant)", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), - DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "HP xw4600 Workstation", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Panasonic CF51-2L", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, - "Matsushita Electric Industrial Co.,Ltd."), - DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-FW21E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCEB17FX", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-SR11M", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Everex StepNote Series", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCEB1Z1E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-NW130D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCCW29FX", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Averatec AV1020-ED2", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), - DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus A8N-SLI DELUXE", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus A8N-SLI Premium", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-SR26GN_P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-FW520F", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Asus K54C", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Asus K54HR", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), - }, - }, - {}, -}; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION @@ -872,13 +894,13 @@ int __init acpi_sleep_init(void) u8 type_a, type_b; #ifdef CONFIG_SUSPEND int i = 0; - - dmi_check_system(acpisleep_dmi_table); #endif if (acpi_disabled) return 0; + acpi_sleep_dmi_check(); + sleep_states[ACPI_STATE_S0] = 1; printk(KERN_INFO PREFIX "(supports S0"); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 48b5a3cda379..91357e1a1a88 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } +static int video_ignore_initial_backlight(const struct dmi_system_id *d) +{ + use_bios_initial_backlight = 0; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Folio 13-2000", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), + }, + }, {} }; @@ -1345,12 +1359,15 @@ static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) { - int status; + int status = 0; struct acpi_device *dev; - status = acpi_video_device_enumerate(video); - if (status) - return status; + /* + * There are systems where video module known to work fine regardless + * of broken _DOD and ignoring returned value here doesn't cause + * any issues later. + */ + acpi_video_device_enumerate(video); list_for_each_entry(dev, &device->children, node) { diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ebaf67e4b2bc..71a4d040f140 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -53,6 +53,7 @@ enum { AHCI_PCI_BAR_STA2X11 = 0, + AHCI_PCI_BAR_ENMOTUS = 2, AHCI_PCI_BAR_STANDARD = 5, }; @@ -396,6 +397,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(0x1b4b, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(0x1b4b, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, @@ -403,7 +406,13 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ /* Asmedia */ - { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + + /* Enmotus */ + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1077,9 +1086,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "PDC42819 can only drive SATA devices with this driver\n"); - /* The Connext uses non-standard BAR */ + /* Both Connext and Enmotus devices use non-standard BARs */ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) ahci_pci_bar = AHCI_PCI_BAR_STA2X11; + else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) + ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; /* acquire resources */ rc = pcim_enable_device(pdev); diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3c809bfbccf5..88f6908c31fe 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -331,6 +331,23 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (DH89xxCC) */ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Avoton) */ + { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Wellsburg) */ + { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + { } /* terminate list */ }; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d31ee557b395..4b4caa3db6ca 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2536,6 +2536,7 @@ int ata_bus_probe(struct ata_port *ap) * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't @@ -4125,6 +4126,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, + { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, /* * Devices which choke on SETXFER. Applies only if both the diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index d1fbd59ead16..e47c224d7c28 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2600,6 +2600,7 @@ int ata_eh_reset(struct ata_link *link, int classify, * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 22226350cd0c..15863a4b6190 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); - if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY)) + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) return ap->ops->sw_activity_show(atadev, buf); return -EINVAL; } @@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, enum sw_activity val; int rc; - if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { val = simple_strtoul(buf, NULL, 0); switch (val) { case OFF: case BLINK_ON: case BLINK_OFF: diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 000fcc99e01d..ef6e328784e9 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -147,6 +147,10 @@ struct pdc_port_priv { dma_addr_t pkt_dma; }; +struct pdc_host_priv { + spinlock_t hard_reset_lock; +}; + static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap) void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1; unsigned int ata_no = pdc_ata_port_to_ata_no(ap); + struct pdc_host_priv *hpriv = ap->host->private_data; u8 tmp; - spin_lock(&ap->host->lock); + spin_lock(&hpriv->hard_reset_lock); tmp = readb(pcictl_b1_mmio); tmp &= ~(0x10 << ata_no); @@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap) writeb(tmp, pcictl_b1_mmio); readb(pcictl_b1_mmio); /* flush */ - spin_unlock(&ap->host->lock); + spin_unlock(&hpriv->hard_reset_lock); } static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, @@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev, const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; const struct ata_port_info *ppi[PDC_MAX_PORTS]; struct ata_host *host; + struct pdc_host_priv *hpriv; void __iomem *host_mmio; int n_ports, i, rc; int is_sataii_tx4; @@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev, dev_err(&pdev->dev, "failed to allocate host\n"); return -ENOMEM; } + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + spin_lock_init(&hpriv->hard_reset_lock); + host->private_data = hpriv; host->iomap = pcim_iomap_table(pdev); is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index c646118943ff..833607f5ae43 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 6a0955e6d4fc..53ecac5a2161 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -636,82 +636,82 @@ struct rx_buf_desc { #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE -typedef volatile u_int freg_t; +typedef volatile u_int ffreg_t; typedef u_int rreg_t; typedef struct _ffredn_t { - freg_t idlehead_high; /* Idle cell header (high) */ - freg_t idlehead_low; /* Idle cell header (low) */ - freg_t maxrate; /* Maximum rate */ - freg_t stparms; /* Traffic Management Parameters */ - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ - freg_t rm_type; /* */ - u_int filler5[0x17 - 0x06]; - freg_t cmd_reg; /* Command register */ - u_int filler18[0x20 - 0x18]; - freg_t cbr_base; /* CBR Pointer Base */ - freg_t vbr_base; /* VBR Pointer Base */ - freg_t abr_base; /* ABR Pointer Base */ - freg_t ubr_base; /* UBR Pointer Base */ - u_int filler24; - freg_t vbrwq_base; /* VBR Wait Queue Base */ - freg_t abrwq_base; /* ABR Wait Queue Base */ - freg_t ubrwq_base; /* UBR Wait Queue Base */ - freg_t vct_base; /* Main VC Table Base */ - freg_t vcte_base; /* Extended Main VC Table Base */ - u_int filler2a[0x2C - 0x2A]; - freg_t cbr_tab_beg; /* CBR Table Begin */ - freg_t cbr_tab_end; /* CBR Table End */ - freg_t cbr_pointer; /* CBR Pointer */ - u_int filler2f[0x30 - 0x2F]; - freg_t prq_st_adr; /* Packet Ready Queue Start Address */ - freg_t prq_ed_adr; /* Packet Ready Queue End Address */ - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ - u_int filler38[0x40 - 0x38]; - freg_t queue_base; /* Base address for PRQ and TCQ */ - freg_t desc_base; /* Base address of descriptor table */ - u_int filler42[0x45 - 0x42]; - freg_t mode_reg_0; /* Mode register 0 */ - freg_t mode_reg_1; /* Mode register 1 */ - freg_t intr_status_reg;/* Interrupt Status register */ - freg_t mask_reg; /* Mask Register */ - freg_t cell_ctr_high1; /* Total cell transfer count (high) */ - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ - freg_t state_reg; /* Status register */ - u_int filler4c[0x58 - 0x4c]; - freg_t curr_desc_num; /* Contains the current descriptor num */ - freg_t next_desc; /* Next descriptor */ - freg_t next_vc; /* Next VC */ - u_int filler5b[0x5d - 0x5b]; - freg_t present_slot_cnt;/* Present slot count */ - u_int filler5e[0x6a - 0x5e]; - freg_t new_desc_num; /* New descriptor number */ - freg_t new_vc; /* New VC */ - freg_t sched_tbl_ptr; /* Schedule table pointer */ - freg_t vbrwq_wptr; /* VBR wait queue write pointer */ - freg_t vbrwq_rptr; /* VBR wait queue read pointer */ - freg_t abrwq_wptr; /* ABR wait queue write pointer */ - freg_t abrwq_rptr; /* ABR wait queue read pointer */ - freg_t ubrwq_wptr; /* UBR wait queue write pointer */ - freg_t ubrwq_rptr; /* UBR wait queue read pointer */ - freg_t cbr_vc; /* CBR VC */ - freg_t vbr_sb_vc; /* VBR SB VC */ - freg_t abr_sb_vc; /* ABR SB VC */ - freg_t ubr_sb_vc; /* UBR SB VC */ - freg_t vbr_next_link; /* VBR next link */ - freg_t abr_next_link; /* ABR next link */ - freg_t ubr_next_link; /* UBR next link */ - u_int filler7a[0x7c-0x7a]; - freg_t out_rate_head; /* Out of rate head */ - u_int filler7d[0xca-0x7d]; /* pad out to full address space */ - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ - u_int fillercc[0x100-0xcc]; /* pad out to full address space */ + ffreg_t idlehead_high; /* Idle cell header (high) */ + ffreg_t idlehead_low; /* Idle cell header (low) */ + ffreg_t maxrate; /* Maximum rate */ + ffreg_t stparms; /* Traffic Management Parameters */ + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ + ffreg_t rm_type; /* */ + u_int filler5[0x17 - 0x06]; + ffreg_t cmd_reg; /* Command register */ + u_int filler18[0x20 - 0x18]; + ffreg_t cbr_base; /* CBR Pointer Base */ + ffreg_t vbr_base; /* VBR Pointer Base */ + ffreg_t abr_base; /* ABR Pointer Base */ + ffreg_t ubr_base; /* UBR Pointer Base */ + u_int filler24; + ffreg_t vbrwq_base; /* VBR Wait Queue Base */ + ffreg_t abrwq_base; /* ABR Wait Queue Base */ + ffreg_t ubrwq_base; /* UBR Wait Queue Base */ + ffreg_t vct_base; /* Main VC Table Base */ + ffreg_t vcte_base; /* Extended Main VC Table Base */ + u_int filler2a[0x2C - 0x2A]; + ffreg_t cbr_tab_beg; /* CBR Table Begin */ + ffreg_t cbr_tab_end; /* CBR Table End */ + ffreg_t cbr_pointer; /* CBR Pointer */ + u_int filler2f[0x30 - 0x2F]; + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ + u_int filler38[0x40 - 0x38]; + ffreg_t queue_base; /* Base address for PRQ and TCQ */ + ffreg_t desc_base; /* Base address of descriptor table */ + u_int filler42[0x45 - 0x42]; + ffreg_t mode_reg_0; /* Mode register 0 */ + ffreg_t mode_reg_1; /* Mode register 1 */ + ffreg_t intr_status_reg;/* Interrupt Status register */ + ffreg_t mask_reg; /* Mask Register */ + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ + ffreg_t state_reg; /* Status register */ + u_int filler4c[0x58 - 0x4c]; + ffreg_t curr_desc_num; /* Contains the current descriptor num */ + ffreg_t next_desc; /* Next descriptor */ + ffreg_t next_vc; /* Next VC */ + u_int filler5b[0x5d - 0x5b]; + ffreg_t present_slot_cnt;/* Present slot count */ + u_int filler5e[0x6a - 0x5e]; + ffreg_t new_desc_num; /* New descriptor number */ + ffreg_t new_vc; /* New VC */ + ffreg_t sched_tbl_ptr; /* Schedule table pointer */ + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ + ffreg_t cbr_vc; /* CBR VC */ + ffreg_t vbr_sb_vc; /* VBR SB VC */ + ffreg_t abr_sb_vc; /* ABR SB VC */ + ffreg_t ubr_sb_vc; /* UBR SB VC */ + ffreg_t vbr_next_link; /* VBR next link */ + ffreg_t abr_next_link; /* ABR next link */ + ffreg_t ubr_next_link; /* UBR next link */ + u_int filler7a[0x7c-0x7a]; + ffreg_t out_rate_head; /* Out of rate head */ + u_int filler7d[0xca-0x7d]; /* pad out to full address space */ + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ + u_int fillercc[0x100-0xcc]; /* pad out to full address space */ } ffredn_t; typedef struct _rfredn_t { diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 98510931c815..1853a45c668c 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card) for (port = 0; tx_pending; tx_pending >>= 1, port++) { if (tx_pending & 1) { struct sk_buff *oldskb = card->tx_skb[port]; - if (oldskb) + if (oldskb) { pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, oldskb->len, PCI_DMA_TODEVICE); - + card->tx_skb[port] = NULL; + } spin_lock(&card->tx_queue_lock); skb = skb_dequeue(&card->tx_queue[port]); if (!skb) diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 26a06b801b5b..b850cecb147b 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -294,7 +294,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, struct device *dev; int error = 0; - if (!bus) + if (!bus || !bus->p) return -EINVAL; klist_iter_init_node(&bus->p->klist_devices, &i, @@ -328,7 +328,7 @@ struct device *bus_find_device(struct bus_type *bus, struct klist_iter i; struct device *dev; - if (!bus) + if (!bus || !bus->p) return NULL; klist_iter_init_node(&bus->p->klist_devices, &i, diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 1b1cbb571d38..97fc774e6bc2 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -160,6 +160,8 @@ static int deferred_probe_initcall(void) driver_deferred_probe_enable = true; driver_deferred_probe_trigger(); + /* Sort as many dependencies as possible before exiting initcalls */ + flush_workqueue(deferred_wq); return 0; } late_initcall(deferred_probe_initcall); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ab7f180bb919..77e3b97b9e8c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1054,7 +1054,7 @@ int dpm_suspend_end(pm_message_t state) error = dpm_suspend_noirq(state); if (error) { - dpm_resume_early(state); + dpm_resume_early(resume_event(state)); return error; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index fd849a2c4fa8..b95ebf2d56ec 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, if (ancestor) error = dev_pm_qos_add_request(ancestor, req, value); - if (error) + if (error < 0) req->dev = NULL; return error; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index bb1ff175b962..c39404167dc8 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -90,7 +90,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos >= count - 1 - tot_len) + if (buf_pos + 1 + tot_len >= count) break; /* Format the register */ diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index c3e9dff4224e..041fddfd01a0 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq) bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) & ~(1 << irqflag)); else - bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0); + bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0); /* assign the new one */ if (irq == 0) { diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 7e138ec21357..edb9233b26d2 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -131,9 +131,10 @@ static int bcma_register_cores(struct bcma_bus *bus) static void bcma_unregister_cores(struct bcma_bus *bus) { - struct bcma_device *core; + struct bcma_device *core, *tmp; - list_for_each_entry(core, &bus->cores, list) { + list_for_each_entry_safe(core, tmp, &bus->cores, list) { + list_del(&core->list); if (core->dev_registered) device_unregister(&core->dev); } diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index db195abad698..e49ddd0aea12 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "47" +#define VERSION "47q" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 321de7b6c442..7eca46349c4c 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -276,8 +276,6 @@ aoeblk_gdalloc(void *vp) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; - if (bdi_init(&d->blkq->backing_dev_info)) - goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; @@ -298,9 +296,6 @@ aoeblk_gdalloc(void *vp) aoedisk_add_sysfs(d); return; -err_blkq: - blk_cleanup_queue(d->blkq); - d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index de0435e63b02..887f68f6d79a 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -35,6 +35,7 @@ new_skb(ulong len) skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); + skb_checksum_none_assert(skb); } return skb; } diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index acda773b3720..da3311129a0c 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ - if( ei->ScsiStatus) - { -#if 0 - printk(KERN_WARNING "cciss: cmd %p " - "has SCSI Status = %x\n", - c, ei->ScsiStatus); -#endif - cmd->result |= (ei->ScsiStatus << 1); - } - else { /* scsi status is zero??? How??? */ + if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen @@ -804,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 4a0f314086e5..be984e073666 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req const int rw = bio_data_dir(bio); int cpu; cpu = part_stat_lock(); + part_round_stats(cpu, &mdev->vdisk->part0); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); part_inc_in_flight(&mdev->vdisk->part0, rw); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b0b00d70c166..c82f06e639f8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4161,6 +4161,7 @@ static int __init floppy_init(void) disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); if (!disks[dr]->queue) { + put_disk(disks[dr]); err = -ENOMEM; goto out_put_disk; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 061427a75d37..3c4c225d1981 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -445,6 +445,14 @@ static void nbd_clear_que(struct nbd_device *nbd) req->errors++; nbd_end_request(req); } + + while (!list_empty(&nbd->waiting_queue)) { + req = list_entry(nbd->waiting_queue.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } @@ -594,6 +602,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); + BUG_ON(!list_empty(&nbd->waiting_queue)); if (file) fput(file); return 0; diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 38a2d0631882..9782340b6692 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c @@ -1153,7 +1153,7 @@ static int nvme_user_admin_cmd(struct nvme_ns *ns, struct nvme_admin_cmd cmd; struct nvme_command c; int status, length; - struct nvme_iod *iod; + struct nvme_iod *uninitialized_var(iod); if (!capable(CAP_SYS_ADMIN)) return -EACCES; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 013c7a549fb6..cba3d0278b86 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -71,7 +71,7 @@ #define DEV_NAME_LEN 32 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1) -#define RBD_NOTIFY_TIMEOUT_DEFAULT 10 +#define RBD_READ_ONLY_DEFAULT false /* * block device image metadata (in-memory version) @@ -94,7 +94,7 @@ struct rbd_image_header { }; struct rbd_options { - int notify_timeout; + bool read_only; }; /* @@ -174,10 +174,13 @@ struct rbd_device { /* protects updating the header */ struct rw_semaphore header_rwsem; + /* name of the snapshot this device reads from */ char snap_name[RBD_MAX_SNAP_NAME_LEN]; - u32 cur_snap; /* index+1 of current snapshot within snap context - 0 - for the head */ - int read_only; + /* id of the snapshot this device reads from */ + u64 snap_id; /* current snapshot id */ + /* whether the snap_id this device reads from still exists */ + bool snap_exists; + bool read_only; struct list_head node; @@ -186,6 +189,7 @@ struct rbd_device { /* sysfs related */ struct device dev; + unsigned long open_count; }; static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ @@ -198,10 +202,6 @@ static DEFINE_SPINLOCK(rbd_client_list_lock); static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); static void rbd_dev_release(struct device *dev); -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count); static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, struct rbd_snap *snap); @@ -247,13 +247,15 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - rbd_get_dev(rbd_dev); - - set_device_ro(bdev, rbd_dev->read_only); - if ((mode & FMODE_WRITE) && rbd_dev->read_only) return -EROFS; + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->read_only); + rbd_dev->open_count++; + mutex_unlock(&ctl_mutex); + return 0; } @@ -261,7 +263,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode) { struct rbd_device *rbd_dev = disk->private_data; + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + BUG_ON(!rbd_dev->open_count); + rbd_dev->open_count--; rbd_put_dev(rbd_dev); + mutex_unlock(&ctl_mutex); return 0; } @@ -343,17 +349,24 @@ static struct rbd_client *__rbd_client_find(struct ceph_options *opt) * mount options */ enum { - Opt_notify_timeout, Opt_last_int, /* int args above */ Opt_last_string, /* string args above */ + Opt_read_only, + Opt_read_write, + /* Boolean args above */ + Opt_last_bool, }; static match_table_t rbdopt_tokens = { - {Opt_notify_timeout, "notify_timeout=%d"}, /* int args above */ /* string args above */ + {Opt_read_only, "read_only"}, + {Opt_read_only, "ro"}, /* Alternate spelling */ + {Opt_read_write, "read_write"}, + {Opt_read_write, "rw"}, /* Alternate spelling */ + /* Boolean args above */ {-1, NULL} }; @@ -378,13 +391,18 @@ static int parse_rbd_opts_token(char *c, void *private) } else if (token > Opt_last_int && token < Opt_last_string) { dout("got string token %d val %s\n", token, argstr[0].from); + } else if (token > Opt_last_string && token < Opt_last_bool) { + dout("got Boolean token %d\n", token); } else { dout("got token %d\n", token); } switch (token) { - case Opt_notify_timeout: - rbdopt->notify_timeout = intval; + case Opt_read_only: + rbdopt->read_only = true; + break; + case Opt_read_write: + rbdopt->read_only = false; break; default: BUG_ON(token); @@ -408,7 +426,7 @@ static struct rbd_client *rbd_get_client(const char *mon_addr, if (!rbd_opts) return ERR_PTR(-ENOMEM); - rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; + rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; opt = ceph_parse_options(options, mon_addr, mon_addr + mon_addr_len, @@ -450,7 +468,9 @@ static void rbd_client_release(struct kref *kref) struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); dout("rbd_release_client %p\n", rbdc); + spin_lock(&rbd_client_list_lock); list_del(&rbdc->node); + spin_unlock(&rbd_client_list_lock); ceph_destroy_client(rbdc->client); kfree(rbdc->rbd_opts); @@ -463,9 +483,7 @@ static void rbd_client_release(struct kref *kref) */ static void rbd_put_client(struct rbd_device *rbd_dev) { - spin_lock(&rbd_client_list_lock); kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); - spin_unlock(&rbd_client_list_lock); rbd_dev->rbd_client = NULL; } @@ -498,7 +516,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header, snap_count = le32_to_cpu(ondisk->snap_count); header->snapc = kmalloc(sizeof(struct ceph_snap_context) + - snap_count * sizeof (*ondisk), + snap_count * sizeof(u64), gfp_flags); if (!header->snapc) return -ENOMEM; @@ -552,21 +570,6 @@ err_snapc: return -ENOMEM; } -static int snap_index(struct rbd_image_header *header, int snap_num) -{ - return header->total_snaps - snap_num; -} - -static u64 cur_snap_id(struct rbd_device *rbd_dev) -{ - struct rbd_image_header *header = &rbd_dev->header; - - if (!rbd_dev->cur_snap) - return 0; - - return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)]; -} - static int snap_by_name(struct rbd_image_header *header, const char *snap_name, u64 *seq, u64 *size) { @@ -605,17 +608,18 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size) snapc->seq = header->snap_seq; else snapc->seq = 0; - dev->cur_snap = 0; - dev->read_only = 0; + dev->snap_id = CEPH_NOSNAP; + dev->snap_exists = false; + dev->read_only = dev->rbd_client->rbd_opts->read_only; if (size) *size = header->image_size; } else { ret = snap_by_name(header, dev->snap_name, &snapc->seq, size); if (ret < 0) goto done; - - dev->cur_snap = header->total_snaps - ret; - dev->read_only = 1; + dev->snap_id = snapc->seq; + dev->snap_exists = true; + dev->read_only = true; /* No choice for snapshots */ } ret = 0; @@ -626,7 +630,7 @@ done: static void rbd_header_free(struct rbd_image_header *header) { - kfree(header->snapc); + ceph_put_snap_context(header->snapc); kfree(header->snap_names); kfree(header->snap_sizes); } @@ -904,13 +908,10 @@ static int rbd_do_request(struct request *rq, dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); - down_read(&dev->header_rwsem); - osdc = &dev->rbd_client->client->osdc; req = ceph_osdc_alloc_request(osdc, flags, snapc, ops, false, GFP_NOIO, pages, bio); if (!req) { - up_read(&dev->header_rwsem); ret = -ENOMEM; goto done_pages; } @@ -937,15 +938,15 @@ static int rbd_do_request(struct request *rq, layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_pg_preferred = cpu_to_le32(-1); layout->fl_pg_pool = cpu_to_le32(dev->poolid); - ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, - req, ops); + ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, + req, ops); + BUG_ON(ret != 0); ceph_osdc_build_request(req, ofs, &len, ops, snapc, &mtime, req->r_oid, req->r_oid_len); - up_read(&dev->header_rwsem); if (linger_req) { ceph_osdc_set_request_linger(osdc, req); @@ -1210,7 +1211,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, if (ret < 0) return ret; - ops[0].watch.ver = cpu_to_le64(dev->header.obj_version); + ops[0].watch.ver = cpu_to_le64(ver); ops[0].watch.cookie = notify_id; ops[0].watch.flag = 0; @@ -1230,6 +1231,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *dev = (struct rbd_device *)data; + u64 hver; int rc; if (!dev) @@ -1239,12 +1241,13 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) notify_id, (int)opcode); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); rc = __rbd_update_snaps(dev); + hver = dev->header.obj_version; mutex_unlock(&ctl_mutex); if (rc) pr_warning(RBD_DRV_NAME "%d got notification but failed to " " update snaps: %d\n", dev->major, rc); - rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); + rbd_req_sync_notify_ack(dev, hver, notify_id, dev->obj_md_name); } /* @@ -1321,71 +1324,7 @@ static int rbd_req_sync_unwatch(struct rbd_device *dev, return ret; } -struct rbd_notify_info { - struct rbd_device *dev; -}; - -static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data) -{ - struct rbd_device *dev = (struct rbd_device *)data; - if (!dev) - return; - - dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, - notify_id, (int)opcode); -} - -/* - * Request sync osd notify - */ -static int rbd_req_sync_notify(struct rbd_device *dev, - const char *obj) -{ - struct ceph_osd_req_op *ops; - struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc; - struct ceph_osd_event *event; - struct rbd_notify_info info; - int payload_len = sizeof(u32) + sizeof(u32); - int ret; - - ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len); - if (ret < 0) - return ret; - - info.dev = dev; - - ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1, - (void *)&info, &event); - if (ret < 0) - goto fail; - - ops[0].watch.ver = 1; - ops[0].watch.flag = 1; - ops[0].watch.cookie = event->cookie; - ops[0].watch.prot_ver = RADOS_NOTIFY_VER; - ops[0].watch.timeout = 12; - - ret = rbd_req_sync_op(dev, NULL, - CEPH_NOSNAP, - 0, - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - ops, - 1, obj, 0, 0, NULL, NULL, NULL); - if (ret < 0) - goto fail_event; - - ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT); - dout("ceph_osdc_wait_event returned %d\n", ret); - rbd_destroy_ops(ops); - return 0; - -fail_event: - ceph_osdc_cancel_event(event); -fail: - rbd_destroy_ops(ops); - return ret; -} - +#if 0 /* * Request sync osd read */ @@ -1425,6 +1364,7 @@ static int rbd_req_sync_exec(struct rbd_device *dev, dout("cls_exec returned %d\n", ret); return ret; } +#endif static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) { @@ -1457,6 +1397,7 @@ static void rbd_rq_fn(struct request_queue *q) u64 ofs; int num_segs, cur_seg = 0; struct rbd_req_coll *coll; + struct ceph_snap_context *snapc; /* peek at request from block layer */ if (!rq) @@ -1483,6 +1424,20 @@ static void rbd_rq_fn(struct request_queue *q) spin_unlock_irq(q->queue_lock); + down_read(&rbd_dev->header_rwsem); + + if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) { + up_read(&rbd_dev->header_rwsem); + dout("request for non-existent snapshot"); + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENXIO); + continue; + } + + snapc = ceph_get_snap_context(rbd_dev->header.snapc); + + up_read(&rbd_dev->header_rwsem); + dout("%s 0x%x bytes at 0x%llx\n", do_write ? "write" : "read", size, blk_rq_pos(rq) * SECTOR_SIZE); @@ -1492,6 +1447,7 @@ static void rbd_rq_fn(struct request_queue *q) if (!coll) { spin_lock_irq(q->queue_lock); __blk_end_request_all(rq, -ENOMEM); + ceph_put_snap_context(snapc); continue; } @@ -1515,13 +1471,13 @@ static void rbd_rq_fn(struct request_queue *q) /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, - rbd_dev->header.snapc, + snapc, ofs, op_size, bio, coll, cur_seg); else rbd_req_read(rq, rbd_dev, - cur_snap_id(rbd_dev), + rbd_dev->snap_id, ofs, op_size, bio, coll, cur_seg); @@ -1538,6 +1494,8 @@ next_seg: if (bp) bio_pair_release(bp); spin_lock_irq(q->queue_lock); + + ceph_put_snap_context(snapc); } } @@ -1641,55 +1599,6 @@ out_dh: return rc; } -/* - * create a snapshot - */ -static int rbd_header_add_snap(struct rbd_device *dev, - const char *snap_name, - gfp_t gfp_flags) -{ - int name_len = strlen(snap_name); - u64 new_snapid; - int ret; - void *data, *p, *e; - u64 ver; - struct ceph_mon_client *monc; - - /* we should create a snapshot only if we're pointing at the head */ - if (dev->cur_snap) - return -EINVAL; - - monc = &dev->rbd_client->client->monc; - ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid); - dout("created snapid=%lld\n", new_snapid); - if (ret < 0) - return ret; - - data = kmalloc(name_len + 16, gfp_flags); - if (!data) - return -ENOMEM; - - p = data; - e = data + name_len + 16; - - ceph_encode_string_safe(&p, e, snap_name, name_len, bad); - ceph_encode_64_safe(&p, e, new_snapid, bad); - - ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", - data, p - data, &ver); - - kfree(data); - - if (ret < 0) - return ret; - - dev->header.snapc->seq = new_snapid; - - return 0; -bad: - return -ERANGE; -} - static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) { struct rbd_snap *snap; @@ -1714,10 +1623,15 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev) if (ret < 0) return ret; + down_write(&rbd_dev->header_rwsem); + /* resized? */ - set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE); + if (rbd_dev->snap_id == CEPH_NOSNAP) { + sector_t size = (sector_t) h.image_size / SECTOR_SIZE; - down_write(&rbd_dev->header_rwsem); + dout("setting size to %llu sectors", (unsigned long long) size); + set_capacity(rbd_dev->disk, size); + } snap_seq = rbd_dev->header.snapc->seq; if (rbd_dev->header.total_snaps && @@ -1726,10 +1640,12 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev) if head moves */ follow_seq = 1; - kfree(rbd_dev->header.snapc); + ceph_put_snap_context(rbd_dev->header.snapc); kfree(rbd_dev->header.snap_names); kfree(rbd_dev->header.snap_sizes); + rbd_dev->header.obj_version = h.obj_version; + rbd_dev->header.image_size = h.image_size; rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; @@ -1833,8 +1749,13 @@ static ssize_t rbd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + sector_t size; + + down_read(&rbd_dev->header_rwsem); + size = get_capacity(rbd_dev->disk); + up_read(&rbd_dev->header_rwsem); - return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); + return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE); } static ssize_t rbd_major_show(struct device *dev, @@ -1905,7 +1826,6 @@ static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); -static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, @@ -1915,7 +1835,6 @@ static struct attribute *rbd_attrs[] = { &dev_attr_name.attr, &dev_attr_current_snap.attr, &dev_attr_refresh.attr, - &dev_attr_create_snap.attr, NULL }; @@ -2084,7 +2003,14 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) cur_id = rbd_dev->header.snapc->snaps[i - 1]; if (!i || old_snap->id < cur_id) { - /* old_snap->id was skipped, thus was removed */ + /* + * old_snap->id was skipped, thus was + * removed. If this rbd_dev is mapped to + * the removed snapshot, record that it no + * longer exists, to prevent further I/O. + */ + if (rbd_dev->snap_id == old_snap->id) + rbd_dev->snap_exists = false; __rbd_remove_snap_dev(rbd_dev, old_snap); continue; } @@ -2232,8 +2158,8 @@ static void rbd_id_put(struct rbd_device *rbd_dev) struct rbd_device *rbd_dev; rbd_dev = list_entry(tmp, struct rbd_device, node); - if (rbd_id > max_id) - max_id = rbd_id; + if (rbd_dev->id > max_id) + max_id = rbd_dev->id; } spin_unlock(&rbd_dev_list_lock); @@ -2530,6 +2456,11 @@ static ssize_t rbd_remove(struct bus_type *bus, goto done; } + if (rbd_dev->open_count) { + ret = -EBUSY; + goto done; + } + __rbd_remove_all_snaps(rbd_dev); rbd_bus_del_dev(rbd_dev); @@ -2538,47 +2469,6 @@ done: return ret; } -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - int ret; - char *name = kmalloc(count + 1, GFP_KERNEL); - if (!name) - return -ENOMEM; - - snprintf(name, count, "%s", buf); - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - ret = rbd_header_add_snap(rbd_dev, - name, GFP_KERNEL); - if (ret < 0) - goto err_unlock; - - ret = __rbd_update_snaps(rbd_dev); - if (ret < 0) - goto err_unlock; - - /* shouldn't hold ctl_mutex when notifying.. notify might - trigger a watch callback that would need to get that mutex */ - mutex_unlock(&ctl_mutex); - - /* make a best effort, don't error if failed */ - rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name); - - ret = count; - kfree(name); - return ret; - -err_unlock: - mutex_unlock(&ctl_mutex); - kfree(name); - return ret; -} - /* * create control files in sysfs * /sys/bus/rbd/... diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 9dcf76a10bb6..31dd45177a7a 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) int op_len, err; void *req_buf; - if (!(((u64)1 << ((u64)op - 1)) & port->operations)) + if (!(((u64)1 << (u64)op) & port->operations)) return -EOPNOTSUPP; switch (op) { diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 73f196ca713f..73d8c9276b9b 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -337,7 +337,7 @@ static void xen_blkbk_unmap(struct pending_req *req) invcount++; } - ret = gnttab_unmap_refs(unmap, pages, invcount, false); + ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); BUG_ON(ret); } @@ -623,7 +623,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, goto fail_response; } - preq.dev = req->u.rw.handle; preq.sector_number = req->u.rw.sector_number; preq.nr_sects = 0; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 4f66171c6683..a155254f1338 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev) be->blkif = NULL; } + kfree(be->mode); kfree(be); dev_set_drvdata(&dev->dev, NULL); return 0; @@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch, = container_of(watch, struct backend_info, backend_watch); struct xenbus_device *dev = be->dev; int cdrom = 0; + unsigned long handle; char *device_type; DPRINTK(""); @@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch, return; } - if ((be->major || be->minor) && - ((be->major != major) || (be->minor != minor))) { - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", - be->major, be->minor, major, minor); + if (be->major | be->minor) { + if (be->major != major || be->minor != minor) + pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", + be->major, be->minor, major, minor); return; } @@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch, kfree(device_type); } - if (be->major == 0 && be->minor == 0) { - /* Front end dir is a number, which is used as the handle. */ - - char *p = strrchr(dev->otherend, '/') + 1; - long handle; - err = strict_strtoul(p, 0, &handle); - if (err) - return; + /* Front end dir is a number, which is used as the handle. */ + err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); + if (err) + return; - be->major = major; - be->minor = minor; + be->major = major; + be->minor = minor; - err = xen_vbd_create(be->blkif, handle, major, minor, - (NULL == strchr(be->mode, 'w')), cdrom); - if (err) { - be->major = 0; - be->minor = 0; - xenbus_dev_fatal(dev, err, "creating vbd structure"); - return; - } + err = xen_vbd_create(be->blkif, handle, major, minor, + !strchr(be->mode, 'w'), cdrom); + if (err) + xenbus_dev_fatal(dev, err, "creating vbd structure"); + else { err = xenvbd_sysfs_addif(dev); if (err) { xen_vbd_free(&be->blkif->vbd); - be->major = 0; - be->minor = 0; xenbus_dev_fatal(dev, err, "creating sysfs entries"); - return; } + } + if (err) { + kfree(be->mode); + be->mode = NULL; + be->major = 0; + be->minor = 0; + } else { /* We're potentially connected now */ xen_update_blkif_status(be->blkif); } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 57fd867553d7..1ae7039f067a 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -66,6 +66,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 9217121362e1..27f9d9fc5a06 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, + /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, @@ -102,15 +105,14 @@ static struct usb_device_id btusb_table[] = { /* Broadcom BCM20702A0 */ { USB_DEVICE(0x0489, 0xe042) }, - { USB_DEVICE(0x0a5c, 0x21e3) }, - { USB_DEVICE(0x0a5c, 0x21e6) }, - { USB_DEVICE(0x0a5c, 0x21e8) }, - { USB_DEVICE(0x0a5c, 0x21f3) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ { USB_DEVICE(0x0489, 0xe033) }, + /*Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + { } /* Terminating entry */ }; @@ -128,6 +130,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 08427abf5fa5..27f8ddf09d4f 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -1186,17 +1186,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; - size_t in_size = size, out_size; + size_t in_size = size; + ssize_t out_size; /* cannot perform a write until the read has cleared - either via tpm_read or a user_read_timer timeout */ - while (atomic_read(&chip->data_pending) != 0) - msleep(TPM_TIMEOUT); - - mutex_lock(&chip->buffer_mutex); + either via tpm_read or a user_read_timer timeout. + This also prevents splitted buffered writes from blocking here. + */ + if (atomic_read(&chip->data_pending) != 0) + return -EBUSY; if (in_size > TPM_BUFSIZE) - in_size = TPM_BUFSIZE; + return -E2BIG; + + mutex_lock(&chip->buffer_mutex); if (copy_from_user (chip->data_buffer, (void __user *) buf, in_size)) { @@ -1206,6 +1209,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf, /* atomic tpm command send and result receive */ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); + if (out_size < 0) { + mutex_unlock(&chip->buffer_mutex); + return out_size; + } atomic_set(&chip->data_pending, out_size); mutex_unlock(&chip->buffer_mutex); diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 46b77ede84c0..a7c6d6a07d3c 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -67,7 +67,7 @@ static int tpk_printk(const unsigned char *buf, int count) tmp[tpk_curr + 1] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; - if (buf[i + 1] == '\n') + if ((i + 1) < count && buf[i + 1] == '\n') i++; break; case '\n': diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index cdf2f5451c76..f77e341631d7 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1808,7 +1808,8 @@ static void virtcons_remove(struct virtio_device *vdev) /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ - cancel_work_sync(&portdev->control_work); + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index c0e816468e30..c671369a05bf 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -35,7 +35,6 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/cpumask.h> -#include <linux/sched.h> /* for current / set_cpus_allowed() */ #include <linux/io.h> #include <linux/delay.h> @@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned targfreq; + unsigned relation; +}; + +static long powernowk8_target_fn(void *arg) { - cpumask_var_t oldmask; + struct powernowk8_target_arg *pta = arg; + struct cpufreq_policy *pol = pta->pol; + unsigned targfreq = pta->targfreq; + unsigned relation = pta->relation; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret = -EIO; + int ret; if (!data) return -EINVAL; @@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, checkfid = data->currfid; checkvid = data->currvid; - /* only run on specific CPU from here on. */ - /* This is poor form: use a workqueue or smp_call_function_single */ - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_copy(oldmask, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; - } - if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - goto err_out; + return -EIO; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - goto err_out; + return -EIO; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - goto err_out; + return -EIO; mutex_lock(&fidvid_mutex); @@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); - ret = 1; mutex_unlock(&fidvid_mutex); - goto err_out; + return 1; } mutex_unlock(&fidvid_mutex); @@ -1220,12 +1212,18 @@ static int powernowk8_target(struct cpufreq_policy *pol, data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); - ret = 0; -err_out: - set_cpus_allowed_ptr(current, oldmask); - free_cpumask_var(oldmask); - return ret; + return 0; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, + .relation = relation }; + + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index bc6f5faa1e9e..819dfda88236 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) raw_spin_lock_irqsave(&dca_lock, flags); + if (list_empty(&dca_domains)) { + raw_spin_unlock_irqrestore(&dca_lock, flags); + return; + } + list_del(&dca->node); pci_rc = dca_pci_rc_from_dev(dev); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index bf0d7e4e345b..9ec3943b1232 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -664,7 +664,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); if (unlikely(!atslave || !sg_len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); return NULL; } @@ -691,6 +691,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -726,6 +731,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -759,6 +769,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); +err: atc_desc_put(atchan, first); return NULL; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2397f6f451b1..6c87d676c62d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -578,7 +578,7 @@ void dmaengine_get(void) list_del_rcu(&device->global_node); break; } else if (err) - pr_err("%s: failed to get %s: (%d)\n", + pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); } } diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 2ca8d3f779fe..180f5235acc2 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -172,8 +172,7 @@ struct imxdma_engine { struct device_dma_parameters dma_parms; struct dma_device dma_device; void __iomem *base; - struct clk *dma_ahb; - struct clk *dma_ipg; + struct clk *dma_clk; spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; @@ -474,8 +473,10 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) slot = i; break; } - if (slot < 0) + if (slot < 0) { + spin_unlock_irqrestore(&imxdma->lock, flags); return -EBUSY; + } imxdma->slots_2d[slot].xsr = d->x; imxdma->slots_2d[slot].ysr = d->y; @@ -977,20 +978,10 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; } - imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(imxdma->dma_ipg)) { - ret = PTR_ERR(imxdma->dma_ipg); - goto err_clk; - } - - imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); - if (IS_ERR(imxdma->dma_ahb)) { - ret = PTR_ERR(imxdma->dma_ahb); - goto err_clk; - } - - clk_prepare_enable(imxdma->dma_ipg); - clk_prepare_enable(imxdma->dma_ahb); + imxdma->dma_clk = clk_get(NULL, "dma"); + if (IS_ERR(imxdma->dma_clk)) + return PTR_ERR(imxdma->dma_clk); + clk_enable(imxdma->dma_clk); /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -999,14 +990,16 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - goto err_enable; + kfree(imxdma); + return ret; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); - goto err_enable; + kfree(imxdma); + return ret; } } @@ -1103,10 +1096,7 @@ err_init: free_irq(MX1_DMA_INT, NULL); free_irq(MX1_DMA_ERR, NULL); } -err_enable: - clk_disable_unprepare(imxdma->dma_ipg); - clk_disable_unprepare(imxdma->dma_ahb); -err_clk: + kfree(imxdma); return ret; } @@ -1126,9 +1116,7 @@ static int __exit imxdma_remove(struct platform_device *pdev) free_irq(MX1_DMA_ERR, NULL); } - clk_disable_unprepare(imxdma->dma_ipg); - clk_disable_unprepare(imxdma->dma_ahb); - kfree(imxdma); + kfree(imxdma); return 0; } diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f7f1dc62c15c..ed0e8b796a93 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -951,7 +951,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } } - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); /* skip validate if the capability is not present */ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8c44f17a99e4..758122f33a92 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1568,17 +1568,19 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) goto xfer_exit; } - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; /* Use last settings, if not provided */ - if (r->cfg) + if (r->cfg) { + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + r->cfg->nonsecure = 0; + else + r->cfg->nonsecure = 1; + ccr = _prepare_ccr(r->cfg); - else + } else { ccr = readl(regs + CC(thrd->id)); + } /* If this req doesn't have valid xfer settings */ if (!_is_valid(ccr)) { @@ -2935,6 +2937,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + if (!pdmac->peripherals) { + ret = -ENOMEM; + dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); + goto probe_err5; + } for (i = 0; i < num_chan; i++) { pch = &pdmac->peripherals[i]; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 434ad31174f2..c4394892edab 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -109,7 +109,7 @@ static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, node); /* Move the first queued descriptor to active list */ - list_move_tail(&schan->queued, &schan->active); + list_move_tail(&sdesc->node, &schan->active); /* Start the DMA transfer */ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + @@ -428,7 +428,7 @@ static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( unsigned long iflags; int ret; - if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { + if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { ret = -EINVAL; goto err_dir; } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 7ef73c919c5d..a8bfe1c5378c 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) * memory controller and apply to register. Search for the first * bandwidth entry that is greater or equal than the setting requested * and program that. If at last entry, turn off DRAM scrubbing. + * + * If no suitable bandwidth is found, turn off DRAM scrubbing entirely + * by falling back to the last element in scrubrates[]. */ - for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { + for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { /* * skip scrub rates which aren't recommended * (see F10 BKDG, F3x58) @@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) if (scrubrates[i].bandwidth <= new_bw) break; - - /* - * if no suitable bandwidth found, turn off DRAM scrubbing - * entirely by falling back to the last element in the - * scrubrates array. - */ } scrubval = scrubrates[i].scrubval; diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 97f5064e3992..686ac03de130 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -257,7 +257,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj, struct edac_pci_dev_attribute *edac_pci_dev; edac_pci_dev = (struct edac_pci_dev_attribute *)attr; - if (edac_pci_dev->show) + if (edac_pci_dev->store) return edac_pci_dev->store(edac_pci_dev->value, buffer, count); return -EIO; } diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 3bafa3bca148..f4059e9da301 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = { [0] = "Memory Write error on non-redundant retry or " "FBD configuration Write error on retry", }; -#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) -#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) +#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) +#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) #define FERR_NF_FBD 0xa0 static const char *ferr_nf_fbd_name[] = { @@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = { [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", [0] = "Uncorrectable Data ECC on Replay", }; -#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) +#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ @@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_nf_fbd_name)); specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); - branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; + branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, REDMEMA, &syndrome); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 2e6b24547e2a..b8e4809cae0e 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) client->bus_reset_closure = a->bus_reset_closure; if (a->bus_reset != 0) { fill_bus_reset_event(&bus_reset, client); - ret = copy_to_user(u64_to_uptr(a->bus_reset), - &bus_reset, sizeof(bus_reset)); + /* unaligned size of bus_reset is 36 bytes */ + ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); } if (ret == 0 && list_empty(&client->link)) list_add_tail(&client->link, &client->device->client_list); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 68109e9bb04e..04ebeaf8cffe 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -999,6 +999,10 @@ static void fw_device_init(struct work_struct *work) ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? idr_get_new(&fw_device_idr, device, &minor) : -ENOMEM; + if (minor >= 1 << MINORBITS) { + idr_remove(&fw_device_idr, minor); + minor = -ENOSPC; + } up_write(&fw_device_rwsem); if (ret < 0) diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 08c674957af8..638e1f71284a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -861,8 +861,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { buf_ptr += 2; length -= IEEE1394_GASP_HDR_SIZE; - fwnet_incoming_packet(dev, buf_ptr, length, - source_node_id, -1, true); + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + context->card->generation, true); } packet.payload_length = dev->rcv_buffer_size; @@ -958,7 +958,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) break; } - skb_pull(skb, ptask->max_payload); + if (ptask->dest_node == IEEE1394_ALL_NODES) { + skb_pull(skb, + ptask->max_payload + IEEE1394_GASP_HDR_SIZE); + } else { + skb_pull(skb, ptask->max_payload); + } if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, dg_size, fg_off, datagram_label); @@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) smp_rmb(); node_id = dev->card->node_id; - p = skb_push(ptask->skb, 8); + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | RFC2734_SW_VERSION, &p[4]); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index b298158cb922..982f1f5f5742 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -16,6 +16,7 @@ */ static char dmi_empty_string[] = " "; +static u16 __initdata dmi_ver; /* * Catch too early calls to dmi_check_system(): */ @@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, return 0; } -static int __init dmi_checksum(const u8 *buf) +static int __init dmi_checksum(const u8 *buf, u8 len) { u8 sum = 0; int a; - for (a = 0; a < 15; a++) + for (a = 0; a < len; a++) sum += buf[a]; return sum == 0; @@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde return; for (i = 0; i < 16 && (is_ff || is_00); i++) { - if(d[i] != 0x00) is_ff = 0; - if(d[i] != 0xFF) is_00 = 0; + if (d[i] != 0x00) + is_00 = 0; + if (d[i] != 0xFF) + is_ff = 0; } if (is_ff || is_00) @@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde if (!s) return; - sprintf(s, "%pUB", d); + /* + * As of version 2.6 of the SMBIOS specification, the first 3 fields of + * the UUID are supposed to be little-endian encoded. The specification + * says that this is the defacto standard. + */ + if (dmi_ver >= 0x0206) + sprintf(s, "%pUL", d); + else + sprintf(s, "%pUB", d); dmi_ident[slot] = s; } @@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p) u8 buf[15]; memcpy_fromio(buf, p, 15); - if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { + if (dmi_checksum(buf, 15)) { dmi_num = (buf[13] << 8) | buf[12]; dmi_len = (buf[7] << 8) | buf[6]; dmi_base = (buf[11] << 24) | (buf[10] << 16) | (buf[9] << 8) | buf[8]; - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we don't know at this point. - */ - if (buf[14] != 0) - printk(KERN_INFO "DMI %d.%d present.\n", - buf[14] >> 4, buf[14] & 0xF); - else - printk(KERN_INFO "DMI present.\n"); if (dmi_walk_early(dmi_decode) == 0) { + if (dmi_ver) + pr_info("SMBIOS %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + else { + dmi_ver = (buf[14] & 0xF0) << 4 | + (buf[14] & 0x0F); + pr_info("Legacy DMI %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + } dmi_dump_ids(); return 0; } } + dmi_ver = 0; return 1; } +static int __init smbios_present(const char __iomem *p) +{ + u8 buf[32]; + int offset = 0; + + memcpy_fromio(buf, p, 32); + if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) { + dmi_ver = (buf[6] << 8) + buf[7]; + + /* Some BIOS report weird SMBIOS version, fix that up */ + switch (dmi_ver) { + case 0x021F: + case 0x0221: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", + dmi_ver & 0xFF, 3); + dmi_ver = 0x0203; + break; + case 0x0233: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6); + dmi_ver = 0x0206; + break; + } + offset = 16; + } + return dmi_present(buf + offset); +} + void __init dmi_scan_machine(void) { char __iomem *p, *q; int rc; - if (efi_enabled) { + if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.smbios == EFI_INVALID_TABLE_ADDR) goto error; @@ -444,7 +483,7 @@ void __init dmi_scan_machine(void) if (p == NULL) goto error; - rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ + rc = smbios_present(p); dmi_iounmap(p, 32); if (!rc) { dmi_available = 1; @@ -462,7 +501,12 @@ void __init dmi_scan_machine(void) goto error; for (q = p; q < p + 0x10000; q += 16) { - rc = dmi_present(q); + if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0) + rc = smbios_present(q); + else if (memcmp(q, "_DMI_", 5) == 0) + rc = dmi_present(q); + else + continue; if (!rc) { dmi_available = 1; dmi_iounmap(p, 0x10000); diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 47408e802ab6..bfd8f4392492 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -435,12 +435,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf) if (status != EFI_SUCCESS) return -EIO; - if (var->Attributes & 0x1) + if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n"); - if (var->Attributes & 0x2) + if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS) str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n"); - if (var->Attributes & 0x4) + if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS) str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n"); + if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD) + str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n"); + if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) + str += sprintf(str, + "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n"); + if (var->Attributes & + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) + str += sprintf(str, + "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n"); + if (var->Attributes & EFI_VARIABLE_APPEND_WRITE) + str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n"); return str - buf; } @@ -1213,7 +1224,7 @@ efivars_init(void) printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, EFIVARS_DATE); - if (!efi_enabled) + if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; /* For now we'll register the efi directory at /sys/firmware/efi */ @@ -1251,7 +1262,7 @@ err_put: static void __exit efivars_exit(void) { - if (efi_enabled) { + if (efi_enabled(EFI_RUNTIME_SERVICES)) { unregister_efivars(&__efivars); kobject_put(efi_kobj); } diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 4da4eb9ae926..2224f1dc074b 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep) /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will * only use ACPI for this */ - if (!efi_enabled) + if (!efi_enabled(EFI_BOOT)) find_ibft_in_mem(); if (ibft_addr) { diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index 61c2d08d37b6..e42e4b84016e 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -304,6 +304,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p012(group, pin, value); __set_gpio_dir_p012(group, pin, 0); return 0; @@ -314,6 +315,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p3(group, pin, value); __set_gpio_dir_p3(group, pin, 0); return 0; @@ -322,6 +324,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, int value) { + struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + + __set_gpo_level_p3(group, pin, value); return 0; } diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 031c6adf5b65..1a3e2b9b4772 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -116,7 +116,7 @@ static void timbgpio_irq_disable(struct irq_data *d) unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); - tgpio->last_ier &= ~(1 << offset); + tgpio->last_ier &= ~(1UL << offset); iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); spin_unlock_irqrestore(&tgpio->lock, flags); } @@ -128,7 +128,7 @@ static void timbgpio_irq_enable(struct irq_data *d) unsigned long flags; spin_lock_irqsave(&tgpio->lock, flags); - tgpio->last_ier |= 1 << offset; + tgpio->last_ier |= 1UL << offset; iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER); spin_unlock_irqrestore(&tgpio->lock, flags); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 5a75510d66bb..112c16e08471 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -623,9 +623,11 @@ static ssize_t export_store(struct class *class, */ status = gpio_request(gpio, "sysfs"); - if (status < 0) + if (status < 0) { + if (status == -EPROBE_DEFER) + status = -ENODEV; goto done; - + } status = gpio_export(gpio, true); if (status < 0) gpio_free(gpio); @@ -1191,8 +1193,10 @@ int gpio_request(unsigned gpio, const char *label) spin_lock_irqsave(&gpio_lock, flags); - if (!gpio_is_valid(gpio)) + if (!gpio_is_valid(gpio)) { + status = -EINVAL; goto done; + } desc = &gpio_desc[gpio]; chip = desc->chip; if (chip == NULL) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index c79870a75c2f..c61e67222160 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1028,15 +1028,15 @@ void drm_mode_config_cleanup(struct drm_device *dev) fb->funcs->destroy(fb); } - list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { - crtc->funcs->destroy(crtc); - } - list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { + crtc->funcs->destroy(crtc); + } + idr_remove_all(&dev->mode_config.crtc_idr); idr_destroy(&dev->mode_config.crtc_idr); } @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (!req->flags) + if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); @@ -2023,7 +2023,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) switch (bpp) { case 8: - fmt = DRM_FORMAT_RGB332; + fmt = DRM_FORMAT_C8; break; case 16: if (depth == 15) @@ -3409,6 +3409,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { switch (format) { + case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: *depth = 8; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6e38325d04a6..384edc677625 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -66,6 +66,8 @@ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) +/* Force reduced-blanking timings for detailed modes */ +#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) struct detailed_mode_closure { struct drm_connector *connector; @@ -120,6 +122,9 @@ static struct edid_quirk { /* Samsung SyncMaster 22[5-6]BW */ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, + + /* ViewSonic VA2026w */ + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, }; /*** DDC fetch and block validation ***/ @@ -852,12 +857,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, "Wrong Hsync/Vsync pulse width\n"); return NULL; } + + if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { + mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); + if (!mode) + return NULL; + + goto set_size; + } + mode = drm_mode_create(dev); if (!mode) return NULL; - mode->type = DRM_MODE_TYPE_DRIVER; - if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) timing->pixel_clock = cpu_to_le16(1088); @@ -881,8 +893,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, drm_mode_do_interlace_quirk(mode, pt); - drm_mode_set_name(mode); - if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } @@ -892,6 +902,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; +set_size: mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; @@ -905,6 +916,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->height_mm = edid->height_cm * 10; } + mode->type = DRM_MODE_TYPE_DRIVER; + drm_mode_set_name(mode); + return mode; } @@ -1755,7 +1769,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) num_modes += add_cvt_modes(connector, edid); num_modes += add_standard_modes(connector, edid); num_modes += add_established_modes(connector, edid); - num_modes += add_inferred_modes(connector, edid); + if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) + num_modes += add_inferred_modes(connector, edid); num_modes += add_cea_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 123de28f94ef..b90abff19341 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -139,8 +139,11 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(inode, filp, dev); if (!retcode) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); - if (!dev->open_count++) + if (!dev->open_count++) { retcode = drm_setup(dev); + if (retcode) + dev->open_count--; + } } if (!retcode) { mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 37c9a523dd1c..767782a96c3a 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface, usbdev = interface_to_usbdev(interface); dev->usbdev = usbdev; - dev->dev = &usbdev->dev; + dev->dev = &interface->dev; mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ae8a64f9f845..89f3d4aed664 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -224,6 +224,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { @@ -233,6 +234,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { @@ -241,6 +243,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { @@ -250,6 +253,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, + .has_force_wake = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ @@ -993,6 +997,12 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) + #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5fabc6c31fec..45c5cf83fae2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -255,6 +255,7 @@ struct intel_device_info { u8 is_broadwater:1; u8 is_crestline:1; u8 is_ivybridge:1; + u8 has_force_wake:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; u8 has_hotplug:1; @@ -405,6 +406,8 @@ typedef struct drm_i915_private { unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + unsigned int lvds_val; /* used for checking LVDS channel mode */ struct { int rate; int lanes; @@ -1051,6 +1054,8 @@ struct drm_i915_file_private { #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) + #include "i915_trace.h" /** @@ -1450,12 +1455,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -/* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(dev_priv, reg) \ - (((dev_priv)->info->gen >= 6) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0d1e4b7b4b99..a230a93935bd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -928,6 +928,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } if (obj->gtt_space && + obj->tiling_mode == I915_TILING_NONE && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_object_pin(obj, 0, true); if (ret) @@ -3317,7 +3318,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, struct drm_i915_private *dev_priv = dev->dev_private; int ret; - BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; WARN_ON(i915_verify_lists(dev)); if (obj->gtt_space != NULL) { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index de431942ded4..d4417e3cc3da 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -707,6 +707,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + u64 invalid_offset = (u64)-1; + int j; user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; @@ -717,6 +719,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + /* As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + for (j = 0; j < exec[i].relocation_count; j++) { + if (copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + } + reloc_offset[i] = total; total += exec[i].relocation_count; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 26c67a788770..8bca2d2d804d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -530,6 +530,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_GSE_IVB) intel_opregion_gse_intr(dev); + if (de_iir & DE_PIPEA_VBLANK_IVB) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK_IVB) + drm_handle_vblank(dev, 1); + if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip_plane(dev, 0); @@ -540,12 +546,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip_plane(dev, 1); } - if (de_iir & DE_PIPEA_VBLANK_IVB) - drm_handle_vblank(dev, 0); - - if (de_iir & DE_PIPEB_VBLANK_IVB) - drm_handle_vblank(dev, 1); - /* check event from PCH */ if (de_iir & DE_PCH_EVENT_IVB) { if (pch_iir & SDE_HOTPLUG_MASK_CPT) @@ -622,6 +622,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip_plane(dev, 0); @@ -632,12 +638,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip_plane(dev, 1); } - if (de_iir & DE_PIPEA_VBLANK) - drm_handle_vblank(dev, 0); - - if (de_iir & DE_PIPEB_VBLANK) - drm_handle_vblank(dev, 1); - /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) @@ -2051,10 +2051,22 @@ static int i915_driver_irq_postinstall(struct drm_device *dev) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (IS_G4X(dev)) { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } else if (IS_GEN4(dev)) { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } else { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 29bfd8995738..dde62bf3b1cf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -27,6 +27,8 @@ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. @@ -197,6 +199,14 @@ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +/* IVB has funny definitions for which plane to flip. */ +#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) + #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) @@ -425,6 +435,7 @@ * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the @@ -437,6 +448,9 @@ # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) +#define GEN6_GT_MODE 0x20d0 +#define GEN6_GT_MODE_HI (1 << 9) + #define GFX_MODE 0x02520 #define GFX_MODE_GEN7 0x0229c #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) @@ -1483,14 +1497,20 @@ #define DPC_HOTPLUG_INT_STATUS (1 << 28) #define HDMID_HOTPLUG_INT_STATUS (1 << 27) #define DPD_HOTPLUG_INT_STATUS (1 << 27) +/* CRT/TV common between gen3+ */ #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) -#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) -#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) +/* SDVO is different across gen3/4 */ +#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) +#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) +#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) +#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) +#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) +#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) /* SDVO port control */ #define SDVOB 0x61140 @@ -3015,6 +3035,8 @@ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) +#define PF_PIPE_SEL_MASK_IVB (3<<29) +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b48fc2a8410c..a2c9e56c0c1a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); } +/* get lvds_fp_timing entry + * this function may return NULL if the corresponding entry is invalid + */ +static const struct lvds_fp_timing * +get_lvds_fp_timing(const struct bdb_header *bdb, + const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs, + int index) +{ + size_t data_ofs = (const u8 *)data - (const u8 *)bdb; + u16 data_size = ((const u16 *)data)[-1]; /* stored in header */ + size_t ofs; + + if (index >= ARRAY_SIZE(ptrs->ptr)) + return NULL; + ofs = ptrs->ptr[index].fp_timing_offset; + if (ofs < data_ofs || + ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size) + return NULL; + return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); +} + /* Try to find integrated panel data */ static void parse_lfp_panel_data(struct drm_i915_private *dev_priv, @@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; + const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int i, downclock; @@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, "Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, 10*downclock); } + + fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, + lvds_lfp_data_ptrs, + lvds_options->panel_type); + if (fp_timing) { + /* check the resolution, just to be sure */ + if (fp_timing->x_res == panel_fixed_mode->hdisplay && + fp_timing->y_res == panel_fixed_mode->vdisplay) { + dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; + DRM_DEBUG_KMS("VBT initial LVDS value %x\n", + dev_priv->bios_lvds_val); + } + } } /* Try to find sdvo panel data */ @@ -459,12 +495,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) edp = find_section(bdb, BDB_EDP); if (!edp) { - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { - DRM_DEBUG_KMS("No eDP BDB found but eDP panel " - "supported, assume %dbpp panel color " - "depth.\n", - dev_priv->edp.bpp); - } + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); return; } @@ -617,9 +649,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); - - /* eDP data */ - dev_priv->edp.bpp = 18; } static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 90b9793fd5da..342ffb7ec3d2 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -266,6 +266,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static struct edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) +{ + struct edid *edid; + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } + + return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + + edid = intel_crt_get_edid(connector, adapter); + if (!edid) + return 0; + + return intel_connector_update_modes(connector, edid); +} + static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); @@ -279,7 +309,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct edid *edid; bool is_digital = false; - edid = drm_get_edid(connector, + edid = intel_crt_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); /* * This may be a DVI-I connector with a shared DDC @@ -477,13 +507,13 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ret = intel_ddc_get_modes(connector, + ret = intel_crt_ddc_get_modes(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); if (ret || !IS_G4X(dev)) return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - return intel_ddc_get_modes(connector, + return intel_crt_ddc_get_modes(connector, &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3de3d9b670bb..67f6db534090 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -142,8 +142,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, - .m1 = { .min = 10, .max = 22 }, - .m2 = { .min = 5, .max = 9 }, + .m1 = { .min = 8, .max = 18 }, + .m2 = { .min = 3, .max = 7 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, @@ -360,6 +360,27 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; +static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, + unsigned int reg) +{ + unsigned int val; + + if (dev_priv->lvds_val) + val = dev_priv->lvds_val; + else { + /* BIOS should set the proper LVDS register value at boot, but + * in reality, it doesn't set the value when the lid is closed; + * we need to check "the value to be set" in VBT when LVDS + * register is uninitialized. + */ + val = I915_READ(reg); + if (!(val & ~LVDS_DETECTED)) + val = dev_priv->bios_lvds_val; + dev_priv->lvds_val = val; + } + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; +} + static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, int refclk) { @@ -368,8 +389,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) { + if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { /* LVDS dual channel */ if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; @@ -397,8 +417,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) /* LVDS with dual channel */ limit = &intel_limits_g4x_dual_channel_lvds; else @@ -536,8 +555,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, * reliably set up different single/dual channel state, if we * even can. */ - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) + if (is_dual_link_lvds(dev_priv, LVDS)) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; @@ -1099,7 +1117,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(hdmi_pipe_enabled(dev_priv, val, pipe), + WARN(hdmi_pipe_enabled(dev_priv, pipe, val), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } @@ -1116,13 +1134,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(adpa_pipe_enabled(dev_priv, val, pipe), + WARN(adpa_pipe_enabled(dev_priv, pipe, val), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(lvds_pipe_enabled(dev_priv, val, pipe), + WARN(lvds_pipe_enabled(dev_priv, pipe, val), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1487,7 +1505,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, val, pipe)) { + if (hdmi_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1509,12 +1527,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, val, pipe)) + if (adpa_pipe_enabled(dev_priv, pipe, val)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, val, pipe)) { + if (lvds_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); @@ -2424,18 +2442,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } -static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 flags = I915_READ(SOUTH_CHICKEN1); - - flags |= FDI_PHASE_SYNC_OVR(pipe); - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ - flags |= FDI_PHASE_SYNC_EN(pipe); - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ - POSTING_READ(SOUTH_CHICKEN1); -} - /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -2586,9 +2592,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - if (HAS_PCH_CPT(dev)) - cpt_phase_pointer_enable(dev, pipe); - for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2707,9 +2710,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - if (HAS_PCH_CPT(dev)) - cpt_phase_pointer_enable(dev, pipe); - for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2819,17 +2819,6 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) } } -static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 flags = I915_READ(SOUTH_CHICKEN1); - - flags &= ~(FDI_PHASE_SYNC_EN(pipe)); - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ - flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ - POSTING_READ(SOUTH_CHICKEN1); -} static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -2859,8 +2848,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) I915_WRITE(FDI_RX_CHICKEN(pipe), I915_READ(FDI_RX_CHICKEN(pipe) & ~FDI_RX_PHASE_SYNC_POINTER_EN)); - } else if (HAS_PCH_CPT(dev)) { - cpt_phase_pointer_disable(dev, pipe); } /* still set train pattern 1 */ @@ -2908,18 +2895,37 @@ static void intel_clear_scanline_wait(struct drm_device *dev) I915_WRITE_CTL(ring, tmp); } +static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + bool pending; + + if (atomic_read(&dev_priv->mm.wedged)) + return false; + + spin_lock_irqsave(&dev->event_lock, flags); + pending = to_intel_crtc(crtc)->unpin_work != NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + return pending; +} + static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj; - struct drm_i915_private *dev_priv; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; if (crtc->fb == NULL) return; - obj = to_intel_framebuffer(crtc->fb)->obj; - dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj->pending_flip) == 0); + !intel_crtc_has_pending_flip(crtc)); + + mutex_lock(&dev->struct_mutex); + intel_finish_fb(crtc->fb); + mutex_unlock(&dev->struct_mutex); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) @@ -3095,7 +3101,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) * as some pre-programmed values are broken, * e.g. x201. */ - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + if (IS_IVYBRIDGE(dev)) + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | + PF_PIPE_SEL_IVB(pipe)); + else + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } @@ -3293,6 +3303,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + u32 pctl; if (!intel_crtc->active) return; @@ -3308,6 +3319,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); + + /* Disable pannel fitter if it is on this pipe. */ + pctl = I915_READ(PFIT_CONTROL); + if ((pctl & PFIT_ENABLE) && + ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) + I915_WRITE(PFIT_CONTROL, 0); + intel_disable_pll(dev_priv, pipe); intel_crtc->active = false; @@ -3381,23 +3399,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; - /* Flush any pending WAITs before we disable the pipe. Note that - * we need to drop the struct_mutex in order to acquire it again - * during the lowlevel dpms routines around a couple of the - * operations. It does not look trivial nor desirable to move - * that locking higher. So instead we leave a window for the - * submission of further commands on the fb before we can actually - * disable it. This race with userspace exists anyway, and we can - * only rely on the pipe being disabled by userspace after it - * receives the hotplug notification and has flushed any pending - * batches. - */ - if (crtc->fb) { - mutex_lock(&dev->struct_mutex); - intel_finish_fb(crtc->fb); - mutex_unlock(&dev->struct_mutex); - } - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); @@ -4996,6 +4997,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, } } + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + /* Use VBT settings if we have an eDP panel */ + unsigned int edp_bpc = dev_priv->edp.bpp / 3; + + if (edp_bpc && edp_bpc < display_bpc) { + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); + display_bpc = edp_bpc; + } + continue; + } + /* * HDMI is either 12 or 8, so if the display lets 10bpc sneak * through, clamp it down. (Note: >12bpc will be caught below.) @@ -5318,7 +5330,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, /* default to 8bpc */ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); if (is_dp) { - if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { + if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { pipeconf |= PIPECONF_BPP_6 | PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; @@ -5782,7 +5794,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, /* determine panel color depth */ temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; - dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); + dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode); switch (pipe_bpp) { case 18: temp |= PIPE_6BPC; @@ -7275,9 +7287,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); - if (atomic_read(&obj->pending_flip) == 0) - wake_up(&dev_priv->pending_flip_queue); + wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -7329,14 +7340,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) - goto out; + goto err_unpin; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. @@ -7353,7 +7364,11 @@ static int intel_gen2_queue_flip(struct drm_device *dev, OUT_RING(obj->gtt_offset + offset); OUT_RING(0); /* aux display base address, unused */ ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7370,14 +7385,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) - goto out; + goto err_unpin; if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; @@ -7392,7 +7407,11 @@ static int intel_gen3_queue_flip(struct drm_device *dev, OUT_RING(MI_NOOP); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7408,11 +7427,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; ret = BEGIN_LP_RING(4); if (ret) - goto out; + goto err_unpin; /* i965+ uses the linear or tiled offsets from the * Display Registers (which do not change across a page-flip) @@ -7431,7 +7450,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7447,11 +7470,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) - goto out; + goto err; ret = BEGIN_LP_RING(4); if (ret) - goto out; + goto err_unpin; OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); @@ -7468,7 +7491,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -7486,22 +7513,43 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + uint32_t plane_bit = 0; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) - goto out; + goto err; + + switch(intel_crtc->plane) { + case PLANE_A: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; + break; + case PLANE_B: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; + break; + case PLANE_C: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; + break; + default: + WARN_ONCE(1, "unknown plane in flip command\n"); + ret = -ENODEV; + goto err_unpin; + } ret = intel_ring_begin(ring, 4); if (ret) - goto out; + goto err_unpin; - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); intel_ring_advance(ring); -out: + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: return ret; } @@ -8552,6 +8600,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); + /* WaDisableHiZPlanesWhenMSAAEnabled */ + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); @@ -8600,6 +8652,11 @@ static void gen6_init_clock_gating(struct drm_device *dev) DISPPLANE_TRICKLE_FEED_DISABLE); intel_flush_display_plane(dev_priv, pipe); } + + /* The default value should be 0x200 according to docs, but the two + * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ + I915_WRITE(GEN6_GT_MODE, 0xffff << 16); + I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI); } static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 715afa153025..cd623e899f58 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -247,12 +247,12 @@ struct dip_infoframe { uint16_t bottom_bar_start; uint16_t left_bar_end; uint16_t right_bar_start; - } avi; + } __attribute__ ((packed)) avi; struct { uint8_t vn[8]; uint8_t pd[16]; uint8_t sdi; - } spd; + } __attribute__ ((packed)) spd; uint8_t payload[27]; } __attribute__ ((packed)) body; } __attribute__((packed)); @@ -288,6 +288,8 @@ struct intel_fbc_work { int interval; }; +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2d7f47b56b6a..c60100d48ceb 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -138,14 +138,17 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); data++; } + mmiowb(); flags |= intel_infoframe_flags(frame); I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); + POSTING_READ(VIDEO_DIP_CTL); } static void ironlake_write_infoframe(struct drm_encoder *encoder, @@ -168,14 +171,17 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder, I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + mmiowb(); flags |= intel_infoframe_flags(frame); I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); + POSTING_READ(reg); } static void intel_set_infoframe(struct drm_encoder *encoder, struct dip_infoframe *frame) @@ -271,7 +277,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) u32 temp; u32 enable_bits = SDVO_ENABLE; - if (intel_hdmi->has_audio) + if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) enable_bits |= SDVO_AUDIO_ENABLE; temp = I915_READ(intel_hdmi->sdvox_reg); @@ -549,10 +555,13 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = i9xx_write_infoframe; I915_WRITE(VIDEO_DIP_CTL, 0); + POSTING_READ(VIDEO_DIP_CTL); } else { intel_hdmi->write_infoframe = ironlake_write_infoframe; - for_each_pipe(i) + for_each_pipe(i) { I915_WRITE(TVIDEO_DIP_CTL(i), 0); + POSTING_READ(TVIDEO_DIP_CTL(i)); + } } drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9fadd6431815..a8b28c493a85 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -777,6 +777,22 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Supermicro X7SPA-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index d1928e79d9b6..9a2b27031a4d 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -60,6 +60,25 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) } /** + * intel_connector_update_modes - update connector from edid + * @connector: DRM connector device to use + * @edid: previously read EDID information + */ +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int ret; + + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + + return ret; +} + +/** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use * @adapter: i2c adapter @@ -70,18 +89,12 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret = 0; edid = drm_get_edid(connector, adapter); - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - connector->display_info.raw_edid = NULL; - kfree(edid); - } + if (!edid) + return 0; - return ret; + return intel_connector_update_modes(connector, edid); } static const struct drm_prop_enum_list force_audio_names[] = { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 80b331c322fb..5ba5e66fa070 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -427,9 +427,17 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + if (IS_I830(dev)) { + /* Workaround: Don't disable the overlay fully, since otherwise + * it dies on the next OVERLAY_ON cmd. */ + OUT_RING(MI_NOOP); + OUT_RING(MI_NOOP); + OUT_RING(MI_NOOP); + } else { + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + } ADVANCE_LP_RING(); return intel_overlay_do_wait_request(overlay, request, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 12a9e5fd9252..c17325ce3799 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -249,15 +249,22 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; + int ret = 0; u32 head; + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_get(dev_priv); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); + /* Initialize the ring. */ + I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -283,19 +290,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } - /* Initialize the ring. This must happen _after_ we've cleared the ring - * registers with the above sequence (the readback of the HEAD registers - * also enforces ordering), otherwise the hw might lose the new ring - * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ - if ((I915_READ_CTL(ring) & RING_VALID) == 0 || - I915_READ_START(ring) != obj->gtt_offset || - (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { + if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && + I915_READ_START(ring) == obj->gtt_offset && + (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -303,7 +305,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); - return -EIO; + ret = -EIO; + goto out; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) @@ -315,7 +318,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->last_retired_head = -1; } - return 0; +out: + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_put(dev_priv); + + return ret; } static int diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eea58c6cb05b..1b6b1570f4c2 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -868,31 +868,38 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) } #endif -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) +static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, + unsigned if_index, uint8_t tx_rate, + uint8_t *data, unsigned length) { - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; - uint8_t set_buf_index[2] = { 1, 0 }; - uint64_t *data = (uint64_t *)&avi_if; - unsigned i; - - intel_dip_infoframe_csum(&avi_if); + uint8_t set_buf_index[2] = { if_index, 0 }; + uint8_t hbuf_size, tmp[8]; + int i; if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; - for (i = 0; i < sizeof(avi_if); i += 8) { + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + &hbuf_size, 1)) + return false; + + /* Buffer size is 0 based, hooray! */ + hbuf_size++; + + DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + if_index, length, hbuf_size); + + for (i = 0; i < hbuf_size; i += 8) { + memset(tmp, 0, 8); + if (i < length) + memcpy(tmp, data + i, min_t(unsigned, 8, length - i)); + if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, - data, 8)) + tmp, 8)) return false; - data++; } return intel_sdvo_set_value(intel_sdvo, @@ -900,6 +907,28 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) &tx_rate, 1); } +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; + + intel_dip_infoframe_csum(&avi_if); + + /* sdvo spec says that the ecc is handled by the hw, and it looks like + * we must not send the ecc field, either. */ + memcpy(sdvo_data, &avi_if, 3); + sdvo_data[3] = avi_if.checksum; + memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + + return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, + SDVO_HBUF_TX_VSYNC, + sdvo_data, sizeof(sdvo_data)); +} + static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; @@ -2499,6 +2528,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; + u32 hotplug_mask; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); @@ -2529,10 +2559,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) } } - if (IS_SDVOB(sdvo_reg)) - dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - else - dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; + hotplug_mask = 0; + if (IS_G4X(dev)) { + hotplug_mask = IS_SDVOB(sdvo_reg) ? + SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; + } else if (IS_GEN4(dev)) { + hotplug_mask = IS_SDVOB(sdvo_reg) ? + SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965; + } else { + hotplug_mask = IS_SDVOB(sdvo_reg) ? + SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; + } drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); @@ -2540,14 +2577,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; - /* Set up hotplug command - note paranoia about contents of reply. - * We assume that the hardware is in a sane state, and only touch - * the bits we think we understand. - */ - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, - &intel_sdvo->hotplug_active, 2); - intel_sdvo->hotplug_active[0] &= ~0x3; - if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", @@ -2555,6 +2584,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) goto err; } + /* Only enable the hotplug irq if we need it, to work around noisy + * hotplug lines. + */ + if (intel_sdvo->hotplug_active[0]) + dev_priv->hotplug_supported_mask |= hotplug_mask; + intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 9d030142ee43..770bdd6ecd9f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 #define SDVO_CMD_SET_HBUF_INDEX 0x93 + #define SDVO_HBUF_INDEX_ELD 0 + #define SDVO_HBUF_INDEX_AVI_IF 1 #define SDVO_CMD_GET_HBUF_INDEX 0x94 #define SDVO_CMD_GET_HBUF_INFO 0x95 #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index a85e112863d1..f233b8fe2dad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -586,7 +586,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, args->size = args->pitch * args->height; args->size = roundup(args->size, PAGE_SIZE); - ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo); + ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 4f2030bd5676..05091c22a037 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -186,11 +186,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - NV_INFO(dev, "Disabling display...\n"); - nouveau_display_fini(dev); + if (dev->mode_config.num_crtc) { + NV_INFO(dev, "Disabling display...\n"); + nouveau_display_fini(dev); - NV_INFO(dev, "Disabling fbcon...\n"); - nouveau_fbcon_set_suspend(dev, 1); + NV_INFO(dev, "Disabling fbcon...\n"); + nouveau_fbcon_set_suspend(dev, 1); + } NV_INFO(dev, "Unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -363,10 +365,12 @@ nouveau_pci_resume(struct pci_dev *pdev) NV_ERROR(dev, "Could not pin/map cursor.\n"); } - nouveau_fbcon_set_suspend(dev, 0); - nouveau_fbcon_zfill_all(dev); + if (dev->mode_config.num_crtc) { + nouveau_fbcon_set_suspend(dev, 0); + nouveau_fbcon_zfill_all(dev); - nouveau_display_init(dev); + nouveau_display_init(dev); + } /* Force CLUT to get re-loaded during modeset */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -468,9 +472,7 @@ static int __init nouveau_init(void) #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) nouveau_modeset = 0; - else #endif - nouveau_modeset = 1; } if (!nouveau_modeset) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index c2a8511e855a..b096cf28213a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -47,6 +47,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; + u32 pclass = dev->pdev->class >> 8; switch (dev_priv->chipset & 0xf0) { case 0x00: @@ -526,7 +527,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) } /* headless mode */ - if (nouveau_modeset == 2) { + if (nouveau_modeset == 2 || + (nouveau_modeset < 0 && pclass != PCI_CLASS_DISPLAY_VGA)) { engine->display.early_init = nouveau_stub_init; engine->display.late_takedown = nouveau_stub_takedown; engine->display.create = nouveau_stub_init; diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 8300266ffaea..f180dcfb12fd 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -210,7 +210,7 @@ out: NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); if (blue == 0x18) { - NV_INFO(dev, "Load detected on head A\n"); + NV_DEBUG(dev, "Load detected on head A\n"); return connector_status_connected; } @@ -323,7 +323,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) if (nv17_dac_sample_load(encoder) & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { - NV_INFO(dev, "Load detected on output %c\n", + NV_DEBUG(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; } else { @@ -398,7 +398,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } @@ -447,7 +447,7 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on vga encoder (output %d)\n", mode, nv_encoder->dcb->index); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 2258746016f8..71b62352315f 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -476,7 +476,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } @@ -504,7 +504,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) static inline bool is_powersaving_dpms(int mode) { - return (mode != DRM_MODE_DPMS_ON); + return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED; } static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) @@ -519,7 +519,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", mode, nv_encoder->dcb->index); if (was_powersaving && is_powersaving_dpms(mode)) @@ -564,7 +564,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", mode, nv_encoder->dcb->index); nv04_dfp_update_backlight(encoder, mode); diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 3eb605ddfd03..4de1fbeb8497 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -69,7 +69,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) struct nv04_mode_state *state = &dev_priv->mode_reg; uint8_t crtc1A; - NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on TV encoder (output %d)\n", mode, nv_encoder->dcb->index); state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); @@ -162,7 +162,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a53ca30d05e2..ebbfbd201739 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -258,9 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); - /* disable crtc pair power gating before programming */ - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) - atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -278,25 +275,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { - struct drm_crtc *other_crtc; - struct radeon_crtc *other_radeon_crtc; - list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { - other_radeon_crtc = to_radeon_crtc(other_crtc); - if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || - ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || - ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || - ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || - ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || - ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { - /* if both crtcs in the pair are off, enable power gating */ - if (other_radeon_crtc->enabled == false) - atombios_powergate_crtc(crtc, ATOM_ENABLE); - break; - } - } - } /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -444,11 +422,28 @@ union atom_enable_ss { static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, + int crtc_id, struct radeon_atom_ss *ss) { + unsigned i; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; + if (!enable) { + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != crtc_id && + pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off spread spectrum as it might turn off + * display on active crtc + */ + return; + } + } + } + memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { @@ -1039,7 +1034,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); - atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, @@ -1062,7 +1057,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode ss.step = step_size; } - atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); } } @@ -1534,8 +1529,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) + if (rdev->clock.dp_extclk) return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE6(rdev)) + return ATOM_PPLL0; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; } } } @@ -1571,11 +1570,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) ASIC_INTERNAL_SS_ON_DCPLL, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); /* XXX: DCE5, make sure voltage, dispclk is high enough */ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); } } @@ -1664,8 +1663,23 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; + int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (ASIC_IS_DCE6(rdev)) + atombios_powergate_crtc(crtc, ATOM_ENABLE); + + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != radeon_crtc->crtc_id && + radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } switch (radeon_crtc->pll_id) { case ATOM_PPLL1: @@ -1683,6 +1697,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) default: break; } +done: radeon_crtc->pll_id = -1; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a3ae788a2af2..23e3ea6ba148 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -95,7 +95,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - radeon_dp_set_link_config(connector, mode); + radeon_dp_set_link_config(connector, adjusted_mode); } return true; @@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; @@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - if (ASIC_IS_DCE6(rdev)) { - /* It seems we need to call ATOM_ENCODER_CMD_SETUP again - * before reenabling encoder on DPMS ON, otherwise we never - * get picture - */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { + } else if (ASIC_IS_DCE4(rdev)) { + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } else { + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else + } else if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; - /* DCE4/5 */ - if (ASIC_IS_DCE4(rdev)) { - dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* DCE6 */ + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } + } else if (ASIC_IS_DCE4(rdev)) { + /* DCE4/5 */ + if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) @@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; + /* need to call this here rather than in prepare() since we need some crtc info */ + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); @@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!connector) - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - else - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); - - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - dig->panel_mode); - } else if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: @@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); - else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - } - atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -2102,7 +2115,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) } radeon_atom_output_lock(encoder, true); - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -2123,6 +2135,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { + /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } @@ -2163,14 +2176,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e5328da70f84..c62132cecaf8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -37,6 +37,16 @@ #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 +static const u32 crtc_offsets[6] = +{ + EVERGREEN_CRTC0_REGISTER_OFFSET, + EVERGREEN_CRTC1_REGISTER_OFFSET, + EVERGREEN_CRTC2_REGISTER_OFFSET, + EVERGREEN_CRTC3_REGISTER_OFFSET, + EVERGREEN_CRTC4_REGISTER_OFFSET, + EVERGREEN_CRTC5_REGISTER_OFFSET +}; + static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); void evergreen_pcie_gen2_enable(struct radeon_device *rdev); @@ -101,17 +111,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) { - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; int i; - if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { + if (crtc >= rdev->num_crtc) + return; + + if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { for (i = 0; i < rdev->usec_timeout; i++) { - if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) + if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) break; udelay(1); } for (i = 0; i < rdev->usec_timeout; i++) { - if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) break; udelay(1); } @@ -1117,116 +1129,115 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { + u32 crtc_enabled, tmp, frame_count, blackout; + int i, j; + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - /* Stop all video */ + /* disable VGA render */ WREG32(VGA_RENDER_CONTROL, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + /* blank the display controllers */ + for (i = 0; i < rdev->num_crtc; i++) { + crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; + if (crtc_enabled) { + save->crtc_enabled[i] = true; + if (ASIC_IS_DCE6(rdev)) { + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); + if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { + radeon_wait_for_vblank(rdev, i); + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } else { + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); + if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { + radeon_wait_for_vblank(rdev, i); + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + /* wait for the next frame */ + frame_count = radeon_get_vblank_counter(rdev, i); + for (j = 0; j < rdev->usec_timeout; j++) { + if (radeon_get_vblank_counter(rdev, i) != frame_count) + break; + udelay(1); + } + } else { + save->crtc_enabled[i] = false; + } } - WREG32(D1VGA_CONTROL, 0); - WREG32(D2VGA_CONTROL, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, 0); - WREG32(EVERGREEN_D4VGA_CONTROL, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, 0); - WREG32(EVERGREEN_D6VGA_CONTROL, 0); + radeon_mc_wait_for_idle(rdev); + + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); + if ((blackout & BLACKOUT_MODE_MASK) != 1) { + /* Block CPU access */ + WREG32(BIF_FB_EN, 0); + /* blackout the MC */ + blackout &= ~BLACKOUT_MODE_MASK; + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); } + /* wait for the MC to settle */ + udelay(100); } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) { - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); + u32 tmp, frame_count; + int i, j; - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + /* update crtc base addresses */ + for (i = 0; i < rdev->num_crtc; i++) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)rdev->mc.vram_start); } - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); - /* Unlock host access */ + + /* unblackout the MC */ + tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); + tmp &= ~BLACKOUT_MODE_MASK; + WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); + /* allow CPU access */ + WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); + + for (i = 0; i < rdev->num_crtc; i++) { + if (save->crtc_enabled) { + if (ASIC_IS_DCE6(rdev)) { + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } else { + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); + tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = radeon_get_vblank_counter(rdev, i); + for (j = 0; j < rdev->usec_timeout; j++) { + if (radeon_get_vblank_counter(rdev, i) != frame_count) + break; + udelay(1); + } + } + } + /* Unlock vga access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); WREG32(VGA_RENDER_CONTROL, save->vga_render_control); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index ea69daeffac1..2cbd369e0fcb 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2670,7 +2670,11 @@ static bool evergreen_vm_reg_valid(u32 reg) /* check config regs */ switch (reg) { + case WAIT_UNTIL: case GRBM_GFX_INDEX: + case CP_STRMOUT_CNTL: + case CP_COHER_CNTL: + case CP_COHER_SIZE: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_GS_VERTEX_REUSE: @@ -2775,6 +2779,7 @@ static bool evergreen_vm_reg_valid(u32 reg) case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: return true; default: + DRM_ERROR("Invalid register 0x%x in CS\n", reg); return false; } } diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 96c10b3991aa..34a0e856069f 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -218,6 +218,8 @@ #define EVERGREEN_CRTC_CONTROL 0x6e70 # define EVERGREEN_CRTC_MASTER_EN (1 << 0) # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) +#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74 +# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8) #define EVERGREEN_CRTC_STATUS 0x6e8c # define EVERGREEN_CRTC_V_BLANK (1 << 0) #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index f62ccd3555d8..81e744fab31a 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -77,6 +77,14 @@ #define CONFIG_MEMSIZE 0x5428 +#define BIF_FB_EN 0x5490 +#define FB_READ_EN (1 << 0) +#define FB_WRITE_EN (1 << 1) + +#define CP_STRMOUT_CNTL 0x84FC + +#define CP_COHER_CNTL 0x85F0 +#define CP_COHER_SIZE 0x85F4 #define CP_COHER_BASE 0x85F8 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) @@ -196,6 +204,9 @@ #define NOOFCHAN_MASK 0x00003000 #define MC_SHARED_CHREMAP 0x2008 +#define MC_SHARED_BLACKOUT_CNTL 0x20ac +#define BLACKOUT_MODE_MASK 0x00000007 + #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 138b95216d8d..66150f0ffc73 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -138,21 +138,6 @@ struct radeon_device; /* * BIOS. */ -#define ATRM_BIOS_PAGE 4096 - -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_atrm_supported(struct pci_dev *pdev); -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); -#else -static inline bool radeon_atrm_supported(struct pci_dev *pdev) -{ - return false; -} - -static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ - return -EINVAL; -} -#endif bool radeon_get_bios(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index bd2f33e5c91a..bc6b64fe0e09 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 665df8730c80..917e49cb490b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -400,6 +400,7 @@ void r700_cp_fini(struct radeon_device *rdev); struct evergreen_mc_save { u32 vga_render_control; u32 vga_hdp_control; + bool crtc_enabled[RADEON_MAX_CRTCS]; }; void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index b1e3820df363..5e30e126bfe1 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ - if ((dev->pdev->device == 0x9802) && + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 98724fcb0088..2a2cf0b88a28 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -30,57 +30,8 @@ static struct radeon_atpx_priv { /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle atpx_handle; - acpi_handle atrm_handle; } radeon_atpx_priv; -/* retrieve the ROM in 4k blocks */ -static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, - int offset, int len) -{ - acpi_status status; - union acpi_object atrm_arg_elements[2], *obj; - struct acpi_object_list atrm_arg; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; - - atrm_arg.count = 2; - atrm_arg.pointer = &atrm_arg_elements[0]; - - atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[0].integer.value = offset; - - atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[1].integer.value = len; - - status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); - if (ACPI_FAILURE(status)) { - printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); - return -ENODEV; - } - - obj = (union acpi_object *)buffer.pointer; - memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); - len = obj->buffer.length; - kfree(buffer.pointer); - return len; -} - -bool radeon_atrm_supported(struct pci_dev *pdev) -{ - /* get the discrete ROM only via ATRM */ - if (!radeon_atpx_priv.atpx_detected) - return false; - - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) - return false; - return true; -} - - -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) -{ - return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); -} - static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; @@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { - acpi_handle dhandle, atpx_handle, atrm_handle; + acpi_handle dhandle, atpx_handle; acpi_status status; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); @@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) if (ACPI_FAILURE(status)) return false; - status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (ACPI_FAILURE(status)) - return false; - radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx_handle = atpx_handle; - radeon_atpx_priv.atrm_handle = atrm_handle; return true; } diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 501f4881e5aa..d306cc8fdeaa 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -32,6 +32,7 @@ #include <linux/vga_switcheroo.h> #include <linux/slab.h> +#include <linux/acpi.h> /* * BIOS. */ @@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +#ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. */ +/* retrieve the ROM in 4k blocks */ +#define ATRM_BIOS_PAGE 4096 +/** + * radeon_atrm_call - fetch a chunk of the vbios + * + * @atrm_handle: acpi ATRM handle + * @bios: vbios image pointer + * @offset: offset of vbios image data to fetch + * @len: length of vbios image data to fetch + * + * Executes ATRM to fetch a chunk of the discrete + * vbios image on PX systems (all asics). + * Returns the length of the buffer fetched. + */ +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object atrm_arg_elements[2], *obj; + struct acpi_object_list atrm_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atrm_arg.count = 2; + atrm_arg.pointer = &atrm_arg_elements[0]; + + atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[0].integer.value = offset; + + atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); + len = obj->buffer.length; + kfree(buffer.pointer); + return len; +} + static bool radeon_atrm_get_bios(struct radeon_device *rdev) { int ret; int size = 256 * 1024; int i; + struct pci_dev *pdev = NULL; + acpi_handle dhandle, atrm_handle; + acpi_status status; + bool found = false; + + /* ATRM is for the discrete card only */ + if (rdev->flags & RADEON_IS_IGP) + return false; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } - if (!radeon_atrm_supported(rdev->pdev)) + if (!found) return false; rdev->bios = kmalloc(size, GFP_KERNEL); @@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { - ret = radeon_atrm_get_bios_chunk(rdev->bios, - (i * ATRM_BIOS_PAGE), - ATRM_BIOS_PAGE); + ret = radeon_atrm_call(atrm_handle, + rdev->bios, + (i * ATRM_BIOS_PAGE), + ATRM_BIOS_PAGE); if (ret < ATRM_BIOS_PAGE) break; } @@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } +#else +static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) +{ + return false; +} +#endif static bool ni_read_disabled_bios(struct radeon_device *rdev) { @@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) return legacy_read_disabled_bios(rdev); } +#ifdef CONFIG_ACPI +static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + bool ret = false; + struct acpi_table_header *hdr; + acpi_size tbl_size; + UEFI_ACPI_VFCT *vfct; + GOP_VBIOS_CONTENT *vbios; + VFCT_IMAGE_HEADER *vhdr; + + if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) + return false; + if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { + DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); + goto out_unmap; + } + + vfct = (UEFI_ACPI_VFCT *)hdr; + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + goto out_unmap; + } + + vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); + vhdr = &vbios->VbiosHeader; + DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", + vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, + vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); + + if (vhdr->PCIBus != rdev->pdev->bus->number || + vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || + vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || + vhdr->VendorID != rdev->pdev->vendor || + vhdr->DeviceID != rdev->pdev->device) { + DRM_INFO("ACPI VFCT table is not for this card\n"); + goto out_unmap; + }; + + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + goto out_unmap; + } + + rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); + ret = !!rdev->bios; + +out_unmap: + return ret; +} +#else +static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + return false; +} +#endif bool radeon_get_bios(struct radeon_device *rdev) { @@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev) r = radeon_atrm_get_bios(rdev); if (r == false) + r = radeon_acpi_vfct_bios(rdev); + if (r == false) r = igp_read_bios_from_vram(rdev); if (r == false) r = radeon_read_bios(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 2cad9fde92fc..a2470d96deb6 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2338,6 +2338,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1), ATOM_DEVICE_CRT1_SUPPORT); } + /* RV100 board with external TDMS bit mis-set. + * Actually uses internal TMDS, clear the bit. + */ + if (dev->pdev->device == 0x5159 && + dev->pdev->subsystem_vendor == 0x1014 && + dev->pdev->subsystem_device == 0x029A) { + tmp &= ~(1 << 4); + } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3fb7ca922069..ab63bcd17277 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -689,7 +689,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -895,7 +895,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) return connector->status; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -1335,7 +1335,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (encoder) { /* setup ddc on the bridge */ radeon_atom_ext_encoder_setup_ddc(encoder); - if (radeon_ddc_probe(radeon_connector)) /* try DDC */ + /* bridge chips are always aux */ + if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */ ret = connector_status_connected; else if (radeon_connector->dac_load_detect) { /* try load detection */ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -1353,7 +1354,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { - if (radeon_ddc_probe(radeon_connector)) + /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ + if (radeon_ddc_probe(radeon_connector, false)) ret = connector_status_connected; } } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 711e95ad39bf..8fb6f41bd1f2 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -238,7 +238,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, y = 0; } - if (ASIC_IS_AVIVO(rdev)) { + /* fixed on DCE6 and newer */ + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) { int i = 0; struct drm_crtc *crtc_p; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 5992502a3448..68c89dbfd976 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -358,7 +358,8 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; - if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) + if (efi_enabled(EFI_BOOT) && + rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) return false; /* first check CRTCs */ @@ -772,7 +773,7 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_AGP) rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && - (rdev->family < CHIP_RS400)) + (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; dma_bits = rdev->need_dma32 ? 32 : 40; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0a1d4bd65edc..00d9cac69f93 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -713,10 +713,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); - if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || - (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != - ENCODER_OBJECT_ID_NONE)) { + if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != + ENCODER_OBJECT_ID_NONE) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + + if (dig->dp_i2c_bus) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &dig->dp_i2c_bus->adapter); + } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || @@ -1124,14 +1129,16 @@ radeon_user_framebuffer_create(struct drm_device *dev, } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); + } ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); if (ret) { kfree(radeon_fb); drm_gem_object_unreference_unlocked(obj); - return NULL; + return ERR_PTR(ret); } return &radeon_fb->base; diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 3edec1c198e3..6076e85aa5da 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap); * radeon_ddc_probe * */ -bool radeon_ddc_probe(struct radeon_connector *radeon_connector) +bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux) { u8 out = 0x0; u8 buf[8]; @@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); - ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); + if (use_aux) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2); + } else { + ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); + } + if (ret != 2) /* Couldn't find an accessible DDC on this connector */ return false; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 65060b77c805..645dcbf6490b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -147,6 +147,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) (rdev->pdev->subsystem_device == 0x01fd)) return true; + /* Gateway RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x107b) && + (rdev->pdev->subsystem_device == 0x0185)) + return true; + + /* try and enable MSIs by default on all RS690s */ + if (rdev->family == CHIP_RS690) + return true; + /* RV515 seems to have MSI issues where it loses * MSI rearms occasionally. This leads to lockups and freezes. * disable it by default. diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 42db254f6bb0..53b07a3a8bb3 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc enum drm_connector_status found = connector_status_disconnected; bool color = true; + /* just don't bother on RN50 those chip are often connected to remoting + * console hw and often we get failure to load detect those. So to make + * everyone happy report the encoder as always connected. + */ + if (ASIC_IS_RN50(rdev)) { + return connector_status_connected; + } + /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); @@ -650,6 +658,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; WREG32(RADEON_DAC_CNTL, tmp); + tmp = dac_macro_cntl; tmp &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); @@ -973,11 +982,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; - if (tmds) { - if (tmds->i2c_bus) - radeon_i2c_destroy(tmds->i2c_bus); - } + /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 778c1f0d7b81..dabfefda8f55 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -527,7 +527,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, u8 val); extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 80c6e8bad656..df6a4dbd93f8 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -136,6 +136,7 @@ int radeon_bo_create(struct radeon_device *rdev, acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); +retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -149,8 +150,6 @@ int radeon_bo_create(struct radeon_device *rdev, bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); - -retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ mutex_lock(&rdev->vram_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index caa55d68f319..b8459bdce42e 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -567,7 +567,9 @@ void radeon_pm_suspend(struct radeon_device *rdev) void radeon_pm_resume(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -622,7 +624,9 @@ int radeon_pm_init(struct radeon_device *rdev) radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); /* set up the default clocks if the MC ucode is loaded */ - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index cc33b3d7c33b..33eff8bcabeb 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -311,6 +311,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi { int r; + /* make sure we aren't trying to allocate more space than there is on the ring */ + if (ndw > (ring->ring_size / 4)) + return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ ndw = (ndw + ring->align_mask) & ~ring->align_mask; diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0f656b111c15..a072fa8c46b0 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -1,5 +1,6 @@ cayman 0x9400 0x0000802C GRBM_GFX_INDEX +0x00008040 WAIT_UNTIL 0x000084FC CP_STRMOUT_CNTL 0x000085F0 CP_COHER_CNTL 0x000085F4 CP_COHER_SIZE diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2af1ce69e6e7..1197f2187d73 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2593,6 +2593,7 @@ static bool si_vm_reg_valid(u32 reg) /* check config regs */ switch (reg) { case GRBM_GFX_INDEX: + case CP_STRMOUT_CNTL: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_ESGS_RING_SIZE: diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 53ea2c42dbd6..2c2bc63d6da6 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -413,6 +413,7 @@ # define RDERR_INT_ENABLE (1 << 0) # define GUI_IDLE_INT_ENABLE (1 << 19) +#define CP_STRMOUT_CNTL 0x84FC #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index cb1ee4e0050a..2a258887e419 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->chipset = (enum savage_family)chipset; + pci_set_master(dev->pdev); + return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ebc6fac96e36..578207ecc43f 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* clear the pages coming from the pool if requested */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { list_for_each_entry(p, &plist, lru) { - clear_page(page_address(p)); + if (PageHighMem(p)) + clear_highpage(p); + else + clear_page(page_address(p)); } } diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index ba055e9ca007..32342247d91b 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -22,13 +22,17 @@ static u8 *udl_get_edid(struct udl_device *udl) { u8 *block; - char rbuf[3]; + char *rbuf; int ret, i; block = kmalloc(EDID_LENGTH, GFP_KERNEL); if (block == NULL) return NULL; + rbuf = kmalloc(2, GFP_KERNEL); + if (rbuf == NULL) + goto error; + for (i = 0; i < EDID_LENGTH; i++) { ret = usb_control_msg(udl->ddev->usbdev, usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), @@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl) HZ); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); - i--; goto error; } block[i] = rbuf[1]; } + kfree(rbuf); return block; error: kfree(block); + kfree(rbuf); return NULL; } @@ -59,6 +64,14 @@ static int udl_get_modes(struct drm_connector *connector) connector->display_info.raw_edid = (char *)edid; + /* + * We only read the main block, but if the monitor reports extension + * blocks then the drm edid code expects them to be present, so patch + * the extension count to 0. + */ + edid->checksum += edid->extensions; + edid->extensions = 0; + drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); connector->display_info.raw_edid = NULL; @@ -69,6 +82,13 @@ static int udl_get_modes(struct drm_connector *connector) static int udl_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct udl_device *udl = connector->dev->dev_private; + if (!udl->sku_pixel_limit) + return 0; + + if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit) + return MODE_VIRTUAL_Y; + return 0; } diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 96820d03a303..2b8c4fdda975 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -74,6 +74,8 @@ struct udl_framebuffer { struct drm_framebuffer base; struct udl_gem_object *obj; bool active_16; /* active on the 16-bit channel */ + int x1, y1, x2, y2; /* dirty rect */ + spinlock_t dirty_lock; }; #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base) @@ -103,7 +105,7 @@ udl_fb_user_fb_create(struct drm_device *dev, int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, - u32 byte_offset, u32 byte_width, + u32 byte_offset, u32 device_byte_offset, u32 byte_width, int *ident_ptr, int *sent_ptr); int udl_dumb_create(struct drm_file *file_priv, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 4d9c3a5d8a45..f02d223d3947 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -22,9 +22,9 @@ #include "drm_fb_helper.h" -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ +#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */ -static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */ +static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */ static int fb_bpp = 16; module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); @@ -114,9 +114,10 @@ static void udlfb_dpy_deferred_io(struct fb_info *info, list_for_each_entry(cur, &fbdefio->pagelist, lru) { if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), - &urb, (char *) info->fix.smem_start, - &cmd, cur->index << PAGE_SHIFT, - PAGE_SIZE, &bytes_identical, &bytes_sent)) + &urb, (char *) info->fix.smem_start, + &cmd, cur->index << PAGE_SHIFT, + cur->index << PAGE_SHIFT, + PAGE_SIZE, &bytes_identical, &bytes_sent)) goto error; bytes_rendered += PAGE_SIZE; } @@ -152,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, struct urb *urb; int aligned_x; int bpp = (fb->base.bits_per_pixel / 8); + int x2, y2; + bool store_for_later = false; + unsigned long flags; if (!fb->active_16) return 0; @@ -159,8 +163,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (!fb->obj->vmapping) udl_gem_vmap(fb->obj); - start_cycles = get_cycles(); - aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); x = aligned_x; @@ -170,18 +172,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, (y + height > fb->base.height)) return -EINVAL; + /* if we are in atomic just store the info + can't test inside spin lock */ + if (in_atomic()) + store_for_later = true; + + x2 = x + width - 1; + y2 = y + height - 1; + + spin_lock_irqsave(&fb->dirty_lock, flags); + + if (fb->y1 < y) + y = fb->y1; + if (fb->y2 > y2) + y2 = fb->y2; + if (fb->x1 < x) + x = fb->x1; + if (fb->x2 > x2) + x2 = fb->x2; + + if (store_for_later) { + fb->x1 = x; + fb->x2 = x2; + fb->y1 = y; + fb->y2 = y2; + spin_unlock_irqrestore(&fb->dirty_lock, flags); + return 0; + } + + fb->x1 = fb->y1 = INT_MAX; + fb->x2 = fb->y2 = 0; + + spin_unlock_irqrestore(&fb->dirty_lock, flags); + start_cycles = get_cycles(); + urb = udl_get_urb(dev); if (!urb) return 0; cmd = urb->transfer_buffer; - for (i = y; i < y + height ; i++) { + for (i = y; i <= y2 ; i++) { const int line_offset = fb->base.pitches[0] * i; const int byte_offset = line_offset + (x * bpp); - + const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); if (udl_render_hline(dev, bpp, &urb, (char *) fb->obj->vmapping, - &cmd, byte_offset, width * bpp, + &cmd, byte_offset, dev_byte_offset, + (x2 - x + 1) * bpp, &bytes_identical, &bytes_sent)) goto error; } @@ -406,6 +443,7 @@ udl_framebuffer_init(struct drm_device *dev, { int ret; + spin_lock_init(&ufb->dirty_lock); ufb->obj = obj; ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index b9320e2608dd..fc1134416bcc 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -213,11 +213,12 @@ static void udl_compress_hline16( */ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, - u32 byte_offset, u32 byte_width, + u32 byte_offset, u32 device_byte_offset, + u32 byte_width, int *ident_ptr, int *sent_ptr) { const u8 *line_start, *line_end, *next_pixel; - u32 base16 = 0 + (byte_offset / bpp) * 2; + u32 base16 = 0 + (device_byte_offset / bpp) * 2; struct urb *urb = *urb_ptr; u8 *cmd = *urb_buf_ptr; u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 3fa884db08ab..27151f74fec8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -306,7 +306,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) BUG_ON(!atomic_read(&bo->reserved)); BUG_ON(old_mem_type != TTM_PL_VRAM && - old_mem_type != VMW_PL_FLAG_GMR); + old_mem_type != VMW_PL_GMR); pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; if (pin) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ee24d216aa85..db50604ac216 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -182,6 +182,7 @@ static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; +MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); static int enable_fbdev; @@ -1101,6 +1102,11 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); + (void) vmw_read(dev_priv, SVGA_REG_ID); + mutex_unlock(&dev_priv->hw_mutex); + /** * Reclaim 3d reference held by fbdev and potentially * start fifo. @@ -1158,6 +1164,11 @@ static struct drm_driver driver = { .open = vmw_driver_open, .preclose = vmw_preclose, .postclose = vmw_postclose, + + .dumb_create = vmw_dumb_create, + .dumb_map_offset = vmw_dumb_map_offset, + .dumb_destroy = vmw_dumb_destroy, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d0f2c079ee27..29c984ff7f23 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +int vmw_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); +int vmw_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); /** * Overlay control - vmwgfx_overlay.c */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index f2fb8f15e2f1..7e0743358dff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv, } - event = kzalloc(sizeof(event->event), GFP_KERNEL); + event = kzalloc(sizeof(*event), GFP_KERNEL); if (unlikely(event == NULL)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 2286d47e5022..00fb5aa2bf77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc, struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct drm_framebuffer *old_fb = crtc->fb; struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); - struct drm_file *file_priv = event->base.file_priv; + struct drm_file *file_priv ; struct vmw_fence_obj *fence = NULL; struct drm_clip_rect clips; int ret; + if (event == NULL) + return -EINVAL; + /* require ScreenObject support for page flipping */ if (!dev_priv->sou_priv) return -ENOSYS; + file_priv = event->base.file_priv; if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a37abb581cbb..059b32c6f22b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1917,3 +1917,76 @@ err_ref: vmw_resource_unreference(&res); return ret; } + + +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_buffer_object *tmp; + int ret; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); + if (vmw_user_bo == NULL) + return -ENOMEM; + + ret = ttm_read_lock(&vmaster->lock, true); + if (ret != 0) { + kfree(vmw_user_bo); + return ret; + } + + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, + &vmw_vram_sys_placement, true, + &vmw_user_dmabuf_destroy); + if (ret != 0) + goto out_no_dmabuf; + + tmp = ttm_bo_reference(&vmw_user_bo->dma.base); + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, + &vmw_user_bo->base, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); + if (unlikely(ret != 0)) + goto out_no_base_object; + + args->handle = vmw_user_bo->base.hash.key; + +out_no_base_object: + ttm_bo_unref(&tmp); +out_no_dmabuf: + ttm_read_unlock(&vmaster->lock); + return ret; +} + +int vmw_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_dma_buffer *out_buf; + int ret; + + ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); + if (ret != 0) + return -EINVAL; + + *offset = out_buf->base.addr_space_offset; + vmw_dmabuf_unreference(&out_buf); + return 0; +} + +int vmw_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle) +{ + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + handle, TTM_REF_USAGE); +} diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 41d4437411c3..ff73d60cfc7f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1385,6 +1385,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, @@ -1921,6 +1922,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 41ad6ff548ce..02f4664478bd 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -520,6 +520,9 @@ #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 +#define USB_VENDOR_ID_MASTERKIT 0x16c0 +#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df + #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a @@ -664,6 +667,9 @@ #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 +#define USB_VENDOR_ID_SIGMATEL 0x066F +#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 + #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index d44ea58c597e..88d20101f85b 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -185,6 +185,7 @@ static struct hid_ll_driver logi_dj_ll_driver; static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, size_t count, unsigned char report_type); +static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev); static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) @@ -225,6 +226,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & SPFUNCTION_DEVICE_LIST_EMPTY) { dbg_hid("%s: device list is empty\n", __func__); + djrcv_dev->querying_devices = false; return; } @@ -235,6 +237,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, return; } + if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { + /* The device is already known. No need to reallocate it. */ + dbg_hid("%s: device is already known\n", __func__); + return; + } + dj_hiddev = hid_allocate_device(); if (IS_ERR(dj_hiddev)) { dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", @@ -298,6 +306,7 @@ static void delayedwork_callback(struct work_struct *work) struct dj_report dj_report; unsigned long flags; int count; + int retval; dbg_hid("%s\n", __func__); @@ -330,6 +339,25 @@ static void delayedwork_callback(struct work_struct *work) logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); break; default: + /* A normal report (i. e. not belonging to a pair/unpair notification) + * arriving here, means that the report arrived but we did not have a + * paired dj_device associated to the report's device_index, this + * means that the original "device paired" notification corresponding + * to this dj_device never arrived to this driver. The reason is that + * hid-core discards all packets coming from a device while probe() is + * executing. */ + if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) { + /* ok, we don't know the device, just re-ask the + * receiver for the list of connected devices. */ + retval = logi_dj_recv_query_paired_devices(djrcv_dev); + if (!retval) { + /* everything went fine, so just leave */ + break; + } + dev_err(&djrcv_dev->hdev->dev, + "%s:logi_dj_recv_query_paired_devices " + "error:%d\n", __func__, retval); + } dbg_hid("%s: unexpected report type\n", __func__); } } @@ -360,6 +388,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, if (!djdev) { dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" " is NULL, index %d\n", dj_report->device_index); + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); + + if (schedule_work(&djrcv_dev->work) == 0) { + dbg_hid("%s: did not schedule the work item, was already " + "queued\n", __func__); + } return; } @@ -390,6 +424,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, if (dj_device == NULL) { dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" " is NULL, index %d\n", dj_report->device_index); + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); + + if (schedule_work(&djrcv_dev->work) == 0) { + dbg_hid("%s: did not schedule the work item, was already " + "queued\n", __func__); + } return; } @@ -437,6 +477,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) return logi_dj_recv_send_report(djrcv_dev, &dj_report); } + static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, unsigned timeout) { diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h index fd28a5e0ca3b..4a4000340ce1 100644 --- a/drivers/hid/hid-logitech-dj.h +++ b/drivers/hid/hid-logitech-dj.h @@ -101,6 +101,7 @@ struct dj_receiver_dev { struct work_struct work; struct kfifo notif_fifo; spinlock_t lock; + bool querying_devices; }; struct dj_device { diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index e5c699b6c6f3..38999898d041 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -29,22 +29,30 @@ #define MS_RDESC 0x08 #define MS_NOGET 0x10 #define MS_DUPLICATE_USAGES 0x20 +#define MS_RDESC_3K 0x40 -/* - * Microsoft Wireless Desktop Receiver (Model 1028) has - * 'Usage Min/Max' where it ought to have 'Physical Min/Max' - */ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); + /* + * Microsoft Wireless Desktop Receiver (Model 1028) has + * 'Usage Min/Max' where it ought to have 'Physical Min/Max' + */ if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 && rdesc[559] == 0x29) { hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); rdesc[557] = 0x35; rdesc[559] = 0x45; } + /* the same as above (s/usage/physical/) */ + if ((quirks & MS_RDESC_3K) && *rsize == 106 && + !memcmp((char []){ 0x19, 0x00, 0x29, 0xff }, + &rdesc[94], 4)) { + rdesc[94] = 0x35; + rdesc[96] = 0x45; + } return rdesc; } @@ -193,7 +201,7 @@ static const struct hid_device_id ms_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), .driver_data = MS_PRESENTER }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), - .driver_data = MS_ERGONOMY }, + .driver_data = MS_ERGONOMY | MS_RDESC_3K }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), .driver_data = MS_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c index aa958706c0e5..9e5728502017 100644 --- a/drivers/hid/hid-wiimote-ext.c +++ b/drivers/hid/hid-wiimote-ext.c @@ -378,14 +378,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload) if (ext->motionp) { input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04)); + wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04)); input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08)); + wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08)); } else { input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01)); + wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01)); input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02)); + wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02)); } input_sync(ext->input); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 82f61ee632b5..9fea98f7c374 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, @@ -77,6 +78,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET }, diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 406537420fff..f4c3d28cd1fc 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -146,14 +146,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, if (ret != 0) { err = ret; - goto errorout; + goto error0; } ret = hv_ringbuffer_init( &newchannel->inbound, in, recv_ringbuffer_size); if (ret != 0) { err = ret; - goto errorout; + goto error0; } @@ -168,7 +168,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, if (ret != 0) { err = ret; - goto errorout; + goto error0; } /* Create and init the channel open message */ @@ -177,7 +177,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, GFP_KERNEL); if (!open_info) { err = -ENOMEM; - goto errorout; + goto error0; } init_completion(&open_info->waitevent); @@ -193,7 +193,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, if (userdatalen > MAX_USER_DEFINED_BYTES) { err = -EINVAL; - goto errorout; + goto error0; } if (userdatalen) @@ -208,19 +208,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, sizeof(struct vmbus_channel_open_channel)); if (ret != 0) - goto cleanup; + goto error1; t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); if (t == 0) { err = -ETIMEDOUT; - goto errorout; + goto error1; } if (open_info->response.open_result.status) err = open_info->response.open_result.status; -cleanup: spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&open_info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); @@ -228,9 +227,12 @@ cleanup: kfree(open_info); return err; -errorout: - hv_ringbuffer_cleanup(&newchannel->outbound); - hv_ringbuffer_cleanup(&newchannel->inbound); +error1: + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + list_del(&open_info->msglistentry); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + +error0: free_pages((unsigned long)out, get_order(send_ringbuffer_size + recv_ringbuffer_size)); kfree(open_info); diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index f85ce70d9677..9815f9cc8f7a 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -94,10 +94,18 @@ static ssize_t ad7314_show_temperature(struct device *dev, } } +static ssize_t ad7314_show_name(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); +} + +static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ad7314_show_temperature, NULL, 0); static struct attribute *ad7314_attributes[] = { + &dev_attr_name.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index e65c6e45d36b..7bf4ce3d405e 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c @@ -139,6 +139,12 @@ static ssize_t show_voltage(struct device *dev, } } +static ssize_t ads7871_show_name(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); +} + static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); @@ -148,6 +154,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); +static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL); + static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, @@ -157,6 +165,7 @@ static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, + &dev_attr_name.attr, NULL }; diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 351d1f4593e7..4ee578948723 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } + }, { + /* Old interface reads the same sensor for fan0 and fan1 */ + .ident = "Asus M5A78L", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M5A78L") + } }, { } }; diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index e8e18cab1fb8..770e95951a56 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>"); MODULE_LICENSE("GPL"); +/* Family 16h Northbridge's function 4 PCI ID */ +#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 + /* D18F3 */ #define REG_NORTHBRIDGE_CAP 0xe8 @@ -128,12 +131,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) * counter saturations resulting in bogus power readings. * We correct this value ourselves to cope with older BIOSes. */ -static DEFINE_PCI_DEVICE_TABLE(affected_device) = { +static const struct pci_device_id affected_device[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, { 0 } }; -static void __devinit tweak_runavg_range(struct pci_dev *pdev) +static void tweak_runavg_range(struct pci_dev *pdev) { u32 val; @@ -157,6 +160,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev) REG_TDP_RUNNING_AVERAGE, val); } +#ifdef CONFIG_PM +static int fam15h_power_resume(struct pci_dev *pdev) +{ + tweak_runavg_range(pdev); + return 0; +} +#else +#define fam15h_power_resume NULL +#endif + static void __devinit fam15h_power_init_data(struct pci_dev *f4, struct fam15h_power_data *data) { @@ -246,6 +259,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev) static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} }; MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); @@ -255,6 +269,7 @@ static struct pci_driver fam15h_power_driver = { .id_table = fam15h_power_id_table, .probe = fam15h_power_probe, .remove = __devexit_p(fam15h_power_remove), + .resume = fam15h_power_resume, }; static int __init fam15h_power_init(void) diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index 8fa2632cbbaf..7272176a9ec7 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, struct i2c_client *client = to_i2c_client(dev); long temp; short value; + s32 err; int status = kstrtol(buf, 10, &temp); if (status < 0) @@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, /* Write value */ value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4), (LM73_TEMP_MAX*4)) << 5; - i2c_smbus_write_word_swapped(client, attr->index, value); - return count; + err = i2c_smbus_write_word_swapped(client, attr->index, value); + return (err < 0) ? err : count; } static ssize_t show_temp(struct device *dev, struct device_attribute *da, @@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da, { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); + int temp; + + s32 err = i2c_smbus_read_word_swapped(client, attr->index); + if (err < 0) + return err; + /* use integer division instead of equivalent right shift to guarantee arithmetic shift and preserve the sign */ - int temp = ((s16) (i2c_smbus_read_word_swapped(client, - attr->index))*250) / 32; - return sprintf(buf, "%d\n", temp); + temp = (((s16) err) * 250) / 32; + return scnprintf(buf, PAGE_SIZE, "%d\n", temp); } diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c index 0018c7dd0097..1a174f0a3cde 100644 --- a/drivers/hwmon/twl4030-madc-hwmon.c +++ b/drivers/hwmon/twl4030-madc-hwmon.c @@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct twl4030_madc_request req; + struct twl4030_madc_request req = { + .channels = 1 << attr->index, + .method = TWL4030_MADC_SW2, + .type = TWL4030_MADC_WAIT, + }; long val; - req.channels = (1 << attr->index); - req.method = TWL4030_MADC_SW2; - req.func_cb = NULL; val = twl4030_madc_conversion(&req); if (val < 0) return val; diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 54922ed12978..88effda19198 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -2082,6 +2082,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) mutex_init(&data->lock); mutex_init(&data->update_lock); data->name = w83627ehf_device_names[sio_data->kind]; + data->bank = 0xff; /* Force initial bank selection */ platform_set_drvdata(pdev, data); /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e5f21ceeba51..0a104644bcbb 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -104,6 +104,7 @@ config I2C_I801 DH89xxCC (PCH) Panther Point (PCH) Lynx Point (PCH) + Lynx Point-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -350,9 +351,13 @@ config I2C_DAVINCI devices such as DaVinci NIC. For details please see http://www.ti.com/davinci +config I2C_DESIGNWARE_CORE + tristate + config I2C_DESIGNWARE_PLATFORM tristate "Synopsys DesignWare Platfrom" depends on HAVE_CLK + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. @@ -363,6 +368,7 @@ config I2C_DESIGNWARE_PLATFORM config I2C_DESIGNWARE_PCI tristate "Synopsys DesignWare PCI" depends on PCI + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 2cbfd854a1d1..46ea2a1db7ff 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -34,10 +34,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o +obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o -i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o +i2c-designware-platform-objs := i2c-designware-platdrv.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o -i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o +i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index df8799241009..619334994df9 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -25,6 +25,7 @@ * ---------------------------------------------------------------------------- * */ +#include <linux/export.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/err.h> @@ -305,6 +306,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev) dw_writel(dev, dev->master_cfg , DW_IC_CON); return 0; } +EXPORT_SYMBOL_GPL(i2c_dw_init); /* * Waiting for bus not busy @@ -557,12 +559,14 @@ done: return ret; } +EXPORT_SYMBOL_GPL(i2c_dw_xfer); u32 i2c_dw_func(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); return dev->functionality; } +EXPORT_SYMBOL_GPL(i2c_dw_func); static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { @@ -667,17 +671,20 @@ tx_aborted: return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(i2c_dw_isr); void i2c_dw_enable(struct dw_i2c_dev *dev) { /* Enable the adapter */ dw_writel(dev, 1, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_enable); u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_is_enabled); void i2c_dw_disable(struct dw_i2c_dev *dev) { @@ -688,18 +695,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) dw_writel(dev, 0, DW_IC_INTR_MASK); dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_disable); void i2c_dw_clear_int(struct dw_i2c_dev *dev) { dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_clear_int); void i2c_dw_disable_int(struct dw_i2c_dev *dev) { dw_writel(dev, 0, DW_IC_INTR_MASK); } +EXPORT_SYMBOL_GPL(i2c_dw_disable_int); u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_COMP_PARAM_1); } +EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index ae2945a5e007..d88ec812160e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -52,6 +52,7 @@ DH89xxCC (PCH) 0x2330 32 hard yes yes yes Panther Point (PCH) 0x1e22 32 hard yes yes yes Lynx Point (PCH) 0x8c22 32 hard yes yes yes + Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -147,6 +148,7 @@ #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 struct i801_priv { struct i2c_adapter adapter; @@ -636,6 +638,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) }, { 0, } }; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e3e470fecaa9..67432e200c63 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3451,7 +3451,8 @@ out: } static const struct ibnl_client_cbs cma_cb_table[] = { - [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, + .module = THIS_MODULE }, }; static int __init cma_init(void) diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 396e29370304..8c3b08ac496d 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -151,6 +151,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct netlink_dump_control c = { .dump = client->cb_table[op].dump, + .module = client->cb_table[op].module, }; return netlink_dump_start(nls, skb, nlh, &c); } diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index c438e4691b3c..3f41d9f5f703 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -524,6 +524,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); int nes_destroy_cqp(struct nes_device *); int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); void nes_recheck_link_status(struct work_struct *work); +void nes_terminate_timeout(unsigned long context); /* nes_nic.c */ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index d42c9f435b1b..96801c3a1fe8 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, static void process_critical_error(struct nes_device *nesdev); static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); -static void nes_terminate_timeout(unsigned long context); static void nes_terminate_start_timer(struct nes_qp *nesqp); #ifdef CONFIG_INFINIBAND_NES_DEBUG @@ -3522,7 +3521,7 @@ static void nes_terminate_received(struct nes_device *nesdev, } /* Timeout routine in case terminate fails to complete */ -static void nes_terminate_timeout(unsigned long context) +void nes_terminate_timeout(unsigned long context) { struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; @@ -3532,11 +3531,7 @@ static void nes_terminate_timeout(unsigned long context) /* Set a timer in case hw cannot complete the terminate sequence */ static void nes_terminate_start_timer(struct nes_qp *nesqp) { - init_timer(&nesqp->terminate_timer); - nesqp->terminate_timer.function = nes_terminate_timeout; - nesqp->terminate_timer.expires = jiffies + HZ; - nesqp->terminate_timer.data = (unsigned long)nesqp; - add_timer(&nesqp->terminate_timer); + mod_timer(&nesqp->terminate_timer, (jiffies + HZ)); } /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 8b8812de4b5c..da84ea383f93 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, } nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR); + init_timer(&nesqp->terminate_timer); + nesqp->terminate_timer.function = nes_terminate_timeout; + nesqp->terminate_timer.data = (unsigned long)nesqp; /* update the QP table */ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; @@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, return &nesqp->ibqp; } - /** * nes_clean_cq */ @@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ibmr; case IWNES_MEMREG_TYPE_QP: case IWNES_MEMREG_TYPE_CQ: + if (!region->length) { + nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n"); + ib_umem_release(region); + return ERR_PTR(-EINVAL); + } nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL); if (!nespbl) { nes_debug(NES_DBG_MR, "Unable to allocate PBL\n"); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 3974c290b667..69b23c22d4e9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev) netif_stop_queue(dev); - ipoib_ib_dev_down(dev, 0); + ipoib_ib_dev_down(dev, 1); ipoib_ib_dev_stop(dev, 0); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 20ebc6fd1bb9..213965d5bc8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -190,7 +190,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, mcast->mcmember = *mcmember; - /* Set the cached Q_Key before we attach if it's the broadcast group */ + /* Set the multicast MTU and cached Q_Key before we attach if it's + * the broadcast group. + */ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid))) { spin_lock_irq(&priv->lock); @@ -198,10 +200,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); return -EAGAIN; } + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; + + if (!ipoib_cm_admin_enabled(dev)) { + rtnl_lock(); + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_unlock(); + } } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { @@ -589,14 +598,6 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } - priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); - - if (!ipoib_cm_admin_enabled(dev)) { - rtnl_lock(); - dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); - rtnl_unlock(); - } - ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); clear_bit(IPOIB_MCAST_RUN, &priv->flags); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1b5b0c730054..922d845f76b0 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -638,9 +638,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); if (scmnd) { + srp_free_req(target, req, scmnd, 0); scmnd->result = DID_RESET << 16; scmnd->scsi_done(scmnd); - srp_free_req(target, req, scmnd, 0); } } @@ -1687,6 +1687,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) SRP_TSK_ABORT_TASK); srp_free_req(target, req, scmnd, 0); scmnd->result = DID_ABORT << 16; + scmnd->scsi_done(scmnd); return SUCCESS; } diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 4dfa1eed4b7c..f8f892b076e8 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev) struct walkera_dev *w = input_get_drvdata(dev); parport_disable_irq(w->parport); + hrtimer_cancel(&w->timer); } static int walkera0701_connect(struct walkera_dev *w, int parport) @@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (parport_claim(w->pardevice)) goto init_err1; + hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + w->timer.function = timer_handler; + w->input_dev = input_allocate_device(); if (!w->input_dev) goto init_err2; @@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (err) goto init_err3; - hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - w->timer.function = timer_handler; return 0; init_err3: @@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) static void walkera0701_disconnect(struct walkera_dev *w) { - hrtimer_cancel(&w->timer); input_unregister_device(w->input_dev); parport_release(w->pardevice); parport_unregister_device(w->pardevice); diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index 661a0ca3b3d6..1257ce8147fc 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -759,7 +759,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse) fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y); fsp_set_slot(dev, 1, false, 0, 0); } - if (fgrs > 0) { + if (fgrs == 1 || (fgrs == 2 && !(packet[0] & FSP_PB0_MFMC_FGR2))) { input_report_abs(dev, ABS_X, abs_x); input_report_abs(dev, ABS_Y, abs_y); } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..5f306f79da0c 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { }, }, { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, + { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), @@ -321,6 +335,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, { .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + }, + }, + { + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), }, @@ -901,6 +921,7 @@ static int __init i8042_platform_init(void) int retval; #ifdef CONFIG_X86 + u8 a20_on = 0xdf; /* Just return if pre-detection shows no i8042 controller exist */ if (!x86_platform.i8042_detect()) return -ENODEV; @@ -940,6 +961,14 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + + /* + * A20 was already enabled during early kernel init. But some buggy + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to + * resume from S3. So we do it here and hope that nothing breaks. + */ + i8042_command(&a20_on, 0x10d1); + i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */ #endif /* CONFIG_X86 */ return retval; diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c index 29d5ed4dd31c..80d46100d37e 100644 --- a/drivers/input/touchscreen/tsc40.c +++ b/drivers/input/touchscreen/tsc40.c @@ -107,7 +107,6 @@ static int tsc_connect(struct serio *serio, struct serio_driver *drv) __set_bit(BTN_TOUCH, input_dev->keybit); input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0); input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0); - input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0); serio_set_drvdata(serio, ptsc); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c04ddca7f12f..b573f803d84b 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1002,6 +1002,38 @@ static void __init free_iommu_all(void) } /* + * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) + * Workaround: + * BIOS should disable L2B micellaneous clock gating by setting + * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b + */ +static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) +{ + u32 value; + + if ((boot_cpu_data.x86 != 0x15) || + (boot_cpu_data.x86_model < 0x10) || + (boot_cpu_data.x86_model > 0x1f)) + return; + + pci_write_config_dword(iommu->dev, 0xf0, 0x90); + pci_read_config_dword(iommu->dev, 0xf4, &value); + + if (value & BIT(2)) + return; + + /* Select NB indirect register 0x90 and enable writing */ + pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); + + pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); + pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", + dev_name(&iommu->dev->dev)); + + /* Clear the enable writing bit */ + pci_write_config_dword(iommu->dev, 0xf0, 0x90); +} + +/* * This function clues the initialization function for one IOMMU * together and also allocates the command buffer and programs the * hardware. It does NOT enable the IOMMU. This is done afterwards. @@ -1062,6 +1094,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; + amd_iommu_erratum_746_workaround(iommu); + return pci_enable_device(iommu->dev); } @@ -1538,8 +1572,6 @@ int __init amd_iommu_init_hardware(void) if (amd_iommu_pd_alloc_bitmap == NULL) goto free; - /* init the device table */ - init_device_table(); /* * let all alias entries point to itself @@ -1621,6 +1653,7 @@ out: */ static int __init amd_iommu_init(void) { + struct amd_iommu *iommu; int ret = 0; ret = amd_iommu_init_hardware(); @@ -1639,6 +1672,12 @@ static int __init amd_iommu_init(void) if (ret) goto free; + /* init the device table */ + init_device_table(); + + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + amd_iommu_init_api(); x86_platform.iommu_shutdown = disable_iommus; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5fda348702c1..17119247ac4c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -588,7 +588,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) { int i; - domain->iommu_coherency = 1; + i = find_first_bit(domain->iommu_bmp, g_num_of_iommus); + + domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0; for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { if (!ecap_coherent(g_iommus[i]->ecap)) { @@ -1824,10 +1826,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, if (!pte) return -ENOMEM; /* It is large page*/ - if (largepage_lvl > 1) + if (largepage_lvl > 1) { pteval |= DMA_PTE_LARGE_PAGE; - else + /* Ensure that old small page tables are removed to make room + for superpage, if they exist. */ + dma_pte_clear_range(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + dma_pte_free_pagetable(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; + } } /* We don't need lock here, nobody else @@ -2312,8 +2321,39 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static bool device_has_rmrr(struct pci_dev *dev) +{ + struct dmar_rmrr_unit *rmrr; + int i; + + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + /* + * Return TRUE if this RMRR contains the device that + * is passed in. + */ + if (rmrr->devices[i] == dev) + return true; + } + } + return false; +} + static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + + /* + * We want to prevent any device associated with an RMRR from + * getting placed into the SI Domain. This is done because + * problems exist when devices are moved in and out of domains + * and their respective RMRR info is lost. We exempt USB devices + * from this process due to their usage of RMRRs that are known + * to not be needed after BIOS hand-off to OS. + */ + if (device_has_rmrr(pdev) && + (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) + return 0; + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return 1; @@ -4153,23 +4193,38 @@ static struct iommu_ops intel_iommu_ops = { .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; +static void __devinit quirk_iommu_g4x_gfx(struct pci_dev *dev) +{ + /* G4x/GM45 integrated gfx dmar support is totally busted. */ + printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); + dmar_map_gfx = 0; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); + static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) { /* * Mobile 4 Series Chipset neglects to set RWBF capability, - * but needs it: + * but needs it. Same seems to hold for the desktop versions. */ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); rwbf_quirk = 1; - - /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ - if (dev->revision == 0x07) { - printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); - dmar_map_gfx = 0; - } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf); #define GGC 0x52 #define GGC_MEMORY_SIZE_MASK (0xf << 8) diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index 6777ca049471..73ca321f330a 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c @@ -752,6 +752,7 @@ int __init parse_ioapics_under_ir(void) { struct dmar_drhd_unit *drhd; int ir_supported = 0; + int ioapic_idx; for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; @@ -764,13 +765,20 @@ int __init parse_ioapics_under_ir(void) } } - if (ir_supported && ir_ioapic_num != nr_ioapics) { - printk(KERN_WARNING - "Not all IO-APIC's listed under remapping hardware\n"); - return -1; + if (!ir_supported) + return 0; + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { + int ioapic_id = mpc_ioapic_id(ioapic_idx); + if (!map_ioapic_to_ir(ioapic_id)) { + pr_err(FW_BUG "ioapic %d has no mapping iommu, " + "interrupt remapping will be disabled\n", + ioapic_id); + return -1; + } } - return ir_supported; + return 1; } int __init ir_dev_scope_init(void) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index afa080258bfa..5e1bf61e80e0 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work) if (rc == 0) /* success, resubmit interrupt read URB */ rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc != 0 && rc != -ENODEV) { + + switch (rc) { + case 0: /* success */ + case -ENODEV: /* device gone */ + case -EINVAL: /* URB already resubmitted, or terminal badness */ + break; + default: /* failure: try to recover by resetting the device */ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); if (rc == 0) { @@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface) } /* gigaset_suspend - * This function is called before the USB connection is suspended. + * This function is called before the USB connection is suspended + * or before the USB device is reset. + * In the latter case, message == PMSG_ON. */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) { @@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) del_timer_sync(&ucs->timer_atrdy); del_timer_sync(&ucs->timer_cmd_in); del_timer_sync(&ucs->timer_int_in); - cancel_work_sync(&ucs->int_in_wq); + + /* don't try to cancel int_in_wq from within reset as it + * might be the one requesting the reset + */ + if (message.event != PM_EVENT_ON) + cancel_work_sync(&ucs->int_in_wq); gig_dbg(DEBUG_SUSPEND, "suspend complete"); return 0; diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 579aa021a659..be22d5ee8e0d 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag, CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, CAPIMSG_CONTROL(data)); l -= 12; + if (l <= 0) + return; dbgline = kmalloc(3 * l, GFP_ATOMIC); if (!dbgline) return; diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 5405ec644db3..baf2686aa8eb 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include <linux/sched.h> #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 410a723b8691..23815624f35e 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) /* move current engine to direct mode and remember the state */ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); + if (ret) + return ret; + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); - ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); + ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode); + if (ret) + return ret; /* For loading, all the engines to load mode */ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); @@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) LP5521_PROG_MEM_SIZE, pattern); - ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); - return ret; + return lp5521_write(client, LP5521_REG_OP_MODE, mode); } static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) @@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client, * LP5521_REG_ENABLE register will not have any effect - strange! */ ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf); - if (buf != LP5521_REG_R_CURR_DEFAULT) { + if (ret || buf != LP5521_REG_R_CURR_DEFAULT) { dev_err(&client->dev, "error in resetting chip\n"); goto fail2; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a1a3e6df17b8..f011d4b3c139 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1563,6 +1563,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) if (copy_from_user(dmi, user, tmp.data_size)) goto bad; + /* + * Abort if something changed the ioctl data while it was being copied. + */ + if (dmi->data_size != tmp.data_size) { + DMERR("rejecting ioctl: data size modified while processing parameters"); + goto bad; + } + /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, tmp.data_size)) goto bad; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2e227fbf1622..f220a695a4b1 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1351,17 +1351,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, return q && blk_queue_nonrot(q); } -static bool dm_table_is_nonrot(struct dm_table *t) +static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !blk_queue_add_random(q); +} + +static bool dm_table_all_devices_attribute(struct dm_table *t, + iterate_devices_callout_fn func) { struct dm_target *ti; unsigned i = 0; - /* Ensure that all underlying device are non-rotational. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) + !ti->type->iterate_devices(ti, func, NULL)) return 0; } @@ -1393,7 +1401,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; - if (dm_table_is_nonrot(t)) + /* Ensure that all underlying devices are non-rotational. */ + if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); @@ -1401,6 +1410,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_set_integrity(t); /* + * Determine whether or not this queue's I/O timings contribute + * to the entropy pool, Only request-based targets use this. + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not + * have it set. + */ + if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + + /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios * are processed by request-based dm, which refers to the queue diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index fa365d39b612..68bf5c37c3fe 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) v->hash_dev_block_bits = ffs(num) - 1; if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || - num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != - (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { + (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) + >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { ti->error = "Invalid data blocks"; r = -EINVAL; goto bad; @@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) } if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || - num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != - (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { + (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) + >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { ti->error = "Invalid hash start"; r = -EINVAL; goto bad; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e24143cc2040..32370ea32e22 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -754,8 +754,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above @@ -865,10 +871,14 @@ static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; + dm_request_endio_fn rq_end_io = NULL; - if (mapped && rq_end_io) - r = rq_end_io(tio->ti, clone, error, &tio->info); + if (tio->ti) { + rq_end_io = tio->ti->type->rq_end_io; + + if (mapped && rq_end_io) + r = rq_end_io(tio->ti, clone, error, &tio->info); + } if (r <= 0) /* The target wants to complete the I/O */ @@ -1566,15 +1576,6 @@ static int map_request(struct dm_target *ti, struct request *clone, int r, requeued = 0; struct dm_rq_target_io *tio = clone->end_io_data; - /* - * Hold the md reference here for the in-flight I/O. - * We can't rely on the reference count by device opener, - * because the device may be closed during the request completion - * when all bios are completed. - * See the comment in rq_completed() too. - */ - dm_get(md); - tio->ti = ti; r = ti->type->map_rq(ti, clone, &tio->info); switch (r) { @@ -1606,6 +1607,26 @@ static int map_request(struct dm_target *ti, struct request *clone, return requeued; } +static struct request *dm_start_request(struct mapped_device *md, struct request *orig) +{ + struct request *clone; + + blk_start_request(orig); + clone = orig->special; + atomic_inc(&md->pending[rq_data_dir(clone)]); + + /* + * Hold the md reference here for the in-flight I/O. + * We can't rely on the reference count by device opener, + * because the device may be closed during the request completion + * when all bios are completed. + * See the comment in rq_completed() too. + */ + dm_get(md); + + return clone; +} + /* * q->request_fn for request-based dm. * Called with the queue lock held. @@ -1635,14 +1656,21 @@ static void dm_request_fn(struct request_queue *q) pos = blk_rq_pos(rq); ti = dm_table_find_target(map, pos); - BUG_ON(!dm_target_is_valid(ti)); + if (!dm_target_is_valid(ti)) { + /* + * Must perform setup, that dm_done() requires, + * before calling dm_kill_unmapped_request + */ + DMERR_LIMIT("request attempted access beyond the end of device"); + clone = dm_start_request(md, rq); + dm_kill_unmapped_request(clone, -EIO); + continue; + } if (ti->type->busy && ti->type->busy(ti)) goto delay_and_out; - blk_start_request(rq); - clone = rq->special; - atomic_inc(&md->pending[rq_data_dir(clone)]); + clone = dm_start_request(md, rq); spin_unlock(q->queue_lock); if (map_request(ti, clone, md)) @@ -1662,8 +1690,6 @@ delay_and_out: blk_delay_queue(q, HZ / 10); out: dm_table_put(map); - - return; } int dm_underlying_device_busy(struct request_queue *q) diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ee8ce3e9650..0a447a1be2ca 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1143,8 +1143,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor ret = 0; } rdev->sectors = rdev->sb_start; - /* Limit to 4TB as metadata cannot record more than that */ - if (rdev->sectors >= (2ULL << 32)) + /* Limit to 4TB as metadata cannot record more than that. + * (not needed for Linear and RAID0 as metadata doesn't + * record this size) + */ + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1426,7 +1429,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ - if (num_sectors >= (2ULL << 32)) + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -1802,10 +1805,10 @@ retry: memset(bbp, 0xff, PAGE_SIZE); for (i = 0 ; i < bb->count ; i++) { - u64 internal_bb = *p++; + u64 internal_bb = p[i]; u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | BB_LEN(internal_bb)); - *bbp++ = cpu_to_le64(store_bb); + bbp[i] = cpu_to_le64(store_bb); } bb->changed = 0; if (read_seqretry(&bb->lock, seq)) @@ -7417,6 +7420,8 @@ static int remove_and_add_spares(struct mddev *mddev) } } } + if (removed) + set_bit(MD_CHANGE_DEVS, &mddev->flags); return spares; } @@ -7430,9 +7435,11 @@ static void reap_sync_thread(struct mddev *mddev) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ /* activate any spares */ - if (mddev->pers->spare_active(mddev)) + if (mddev->pers->spare_active(mddev)) { sysfs_notify(&mddev->kobj, NULL, "degraded"); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + } } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev->pers->finish_reshape) @@ -7687,9 +7694,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) { int hi; - int lo = 0; + int lo; u64 *p = bb->page; - int rv = 0; + int rv; sector_t target = s + sectors; unsigned seq; @@ -7704,7 +7711,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, retry: seq = read_seqbegin(&bb->lock); - + lo = 0; + rv = 0; hi = bb->count; /* Binary search between lo and hi for 'target' diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index 5709bfeab1e8..accbb05f17b6 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -36,13 +36,13 @@ struct node_header { __le32 padding; } __packed; -struct node { +struct btree_node { struct node_header header; __le64 keys[0]; } __packed; -void inc_children(struct dm_transaction_manager *tm, struct node *n, +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, struct dm_btree_value_type *vt); int new_block(struct dm_btree_info *info, struct dm_block **result); @@ -64,7 +64,7 @@ struct ro_spine { void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info); int exit_ro_spine(struct ro_spine *s); int ro_step(struct ro_spine *s, dm_block_t new_child); -struct node *ro_node(struct ro_spine *s); +struct btree_node *ro_node(struct ro_spine *s); struct shadow_spine { struct dm_btree_info *info; @@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s); /* * Some inlines. */ -static inline __le64 *key_ptr(struct node *n, uint32_t index) +static inline __le64 *key_ptr(struct btree_node *n, uint32_t index) { return n->keys + index; } -static inline void *value_base(struct node *n) +static inline void *value_base(struct btree_node *n) { return &n->keys[le32_to_cpu(n->header.max_entries)]; } -static inline void *value_ptr(struct node *n, uint32_t index) +static inline void *value_ptr(struct btree_node *n, uint32_t index) { uint32_t value_size = le32_to_cpu(n->header.value_size); return value_base(n) + (value_size * index); @@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index) /* * Assumes the values are suitably-aligned and converts to core format. */ -static inline uint64_t value64(struct node *n, uint32_t index) +static inline uint64_t value64(struct btree_node *n, uint32_t index) { __le64 *values_le = value_base(n); @@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index) /* * Searching for a key within a single node. */ -int lower_bound(struct node *n, uint64_t key); +int lower_bound(struct btree_node *n, uint64_t key); extern struct dm_block_validator btree_node_validator; diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index aa71e2359a07..c4f28133ef82 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -53,7 +53,7 @@ /* * Some little utilities for moving node data around. */ -static void node_shift(struct node *n, int shift) +static void node_shift(struct btree_node *n, int shift) { uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); uint32_t value_size = le32_to_cpu(n->header.value_size); @@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift) } } -static void node_copy(struct node *left, struct node *right, int shift) +static void node_copy(struct btree_node *left, struct btree_node *right, int shift) { uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t value_size = le32_to_cpu(left->header.value_size); @@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift) /* * Delete a specific entry from a leaf node. */ -static void delete_at(struct node *n, unsigned index) +static void delete_at(struct btree_node *n, unsigned index) { unsigned nr_entries = le32_to_cpu(n->header.nr_entries); unsigned nr_to_copy = nr_entries - (index + 1); @@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index) n->header.nr_entries = cpu_to_le32(nr_entries - 1); } -static unsigned merge_threshold(struct node *n) +static unsigned merge_threshold(struct btree_node *n) { return le32_to_cpu(n->header.max_entries) / 3; } @@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n) struct child { unsigned index; struct dm_block *block; - struct node *n; + struct btree_node *n; }; static struct dm_btree_value_type le64_type = { @@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = { .equal = NULL }; -static int init_child(struct dm_btree_info *info, struct node *parent, +static int init_child(struct dm_btree_info *info, struct btree_node *parent, unsigned index, struct child *result) { int r, inc; @@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c) return dm_tm_unlock(info->tm, c->block); } -static void shift(struct node *left, struct node *right, int count) +static void shift(struct btree_node *left, struct btree_node *right, int count) { uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); @@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count) right->header.nr_entries = cpu_to_le32(nr_right + count); } -static void __rebalance2(struct dm_btree_info *info, struct node *parent, +static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, struct child *l, struct child *r) { - struct node *left = l->n; - struct node *right = r->n; + struct btree_node *left = l->n; + struct btree_node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); unsigned threshold = 2 * merge_threshold(left) + 1; @@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, unsigned left_index) { int r; - struct node *parent; + struct btree_node *parent; struct child left, right; parent = dm_block_data(shadow_current(s)); @@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, * in right, then rebalance2. This wastes some cpu, but I want something * simple atm. */ -static void delete_center_node(struct dm_btree_info *info, struct node *parent, +static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent, struct child *l, struct child *c, struct child *r, - struct node *left, struct node *center, struct node *right, + struct btree_node *left, struct btree_node *center, struct btree_node *right, uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) { uint32_t max_entries = le32_to_cpu(left->header.max_entries); @@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent, /* * Redistributes entries among 3 sibling nodes. */ -static void redistribute3(struct dm_btree_info *info, struct node *parent, +static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, struct child *l, struct child *c, struct child *r, - struct node *left, struct node *center, struct node *right, + struct btree_node *left, struct btree_node *center, struct btree_node *right, uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) { int s; @@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent, *key_ptr(parent, r->index) = right->keys[0]; } -static void __rebalance3(struct dm_btree_info *info, struct node *parent, +static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, struct child *l, struct child *c, struct child *r) { - struct node *left = l->n; - struct node *center = c->n; - struct node *right = r->n; + struct btree_node *left = l->n; + struct btree_node *center = c->n; + struct btree_node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_center = le32_to_cpu(center->header.nr_entries); @@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, unsigned left_index) { int r; - struct node *parent = dm_block_data(shadow_current(s)); + struct btree_node *parent = dm_block_data(shadow_current(s)); struct child left, center, right; /* @@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm, { int r; struct dm_block *block; - struct node *n; + struct btree_node *n; r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); if (r) @@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s, { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; - struct node *n; + struct btree_node *n; n = dm_block_data(shadow_current(s)); @@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s, return r; } -static int do_leaf(struct node *n, uint64_t key, unsigned *index) +static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index) { int i = lower_bound(n, key); @@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, uint64_t key, unsigned *index) { int i = *index, r; - struct node *n; + struct btree_node *n; for (;;) { r = shadow_step(s, root, vt); @@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, unsigned level, last_level = info->levels - 1; int index = 0, r = 0; struct shadow_spine spine; - struct node *n; + struct btree_node *n; init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index d9a7912ee8ee..2f0805c3263e 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { - struct node *n = dm_block_data(b); + struct btree_node *n = dm_block_data(b); struct node_header *h = &n->header; h->blocknr = cpu_to_le64(dm_block_location(b)); @@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { - struct node *n = dm_block_data(b); + struct btree_node *n = dm_block_data(b); struct node_header *h = &n->header; size_t value_size; __le32 csum_disk; @@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child) return r; } -struct node *ro_node(struct ro_spine *s) +struct btree_node *ro_node(struct ro_spine *s) { struct dm_block *block; diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index d12b2cc51f1a..371f3d49d18e 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts, /*----------------------------------------------------------------*/ /* makes the assumption that no two keys are the same. */ -static int bsearch(struct node *n, uint64_t key, int want_hi) +static int bsearch(struct btree_node *n, uint64_t key, int want_hi) { int lo = -1, hi = le32_to_cpu(n->header.nr_entries); @@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi) return want_hi ? hi : lo; } -int lower_bound(struct node *n, uint64_t key) +int lower_bound(struct btree_node *n, uint64_t key) { return bsearch(n, key, 0); } -void inc_children(struct dm_transaction_manager *tm, struct node *n, +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, struct dm_btree_value_type *vt) { unsigned i; @@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n, vt->inc(vt->context, value_ptr(n, i)); } -static int insert_at(size_t value_size, struct node *node, unsigned index, +static int insert_at(size_t value_size, struct btree_node *node, unsigned index, uint64_t key, void *value) __dm_written_to_disk(value) { @@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root) { int r; struct dm_block *b; - struct node *n; + struct btree_node *n; size_t block_size; uint32_t max_entries; @@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty); #define MAX_SPINE_DEPTH 64 struct frame { struct dm_block *b; - struct node *n; + struct btree_node *n; unsigned level; unsigned nr_children; unsigned current_child; @@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del); /*----------------------------------------------------------------*/ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, - int (*search_fn)(struct node *, uint64_t), + int (*search_fn)(struct btree_node *, uint64_t), uint64_t *result_key, void *v, size_t value_size) { int i, r; @@ -406,7 +406,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, size_t size; unsigned nr_left, nr_right; struct dm_block *left, *right, *parent; - struct node *ln, *rn, *pn; + struct btree_node *ln, *rn, *pn; __le64 location; left = shadow_current(s); @@ -491,7 +491,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) size_t size; unsigned nr_left, nr_right; struct dm_block *left, *right, *new_parent; - struct node *pn, *ln, *rn; + struct btree_node *pn, *ln, *rn; __le64 val; new_parent = shadow_current(s); @@ -576,7 +576,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, uint64_t key, unsigned *index) { int r, i = *index, top = 1; - struct node *node; + struct btree_node *node; for (;;) { r = shadow_step(s, root, vt); @@ -643,7 +643,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, unsigned level, index = -1, last_level = info->levels - 1; dm_block_t block = root; struct shadow_spine spine; - struct node *n; + struct btree_node *n; struct dm_btree_value_type le64_type; le64_type.context = NULL; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 23904d233735..df445091384a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2564,7 +2564,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) || disk_idx < 0) continue; if (test_bit(Replacement, &rdev->flags)) - disk = conf->mirrors + conf->raid_disks + disk_idx; + disk = conf->mirrors + mddev->raid_disks + disk_idx; else disk = conf->mirrors + disk_idx; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a954c95d7c92..6137d0041d4d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -476,7 +476,7 @@ static void raid10_end_write_request(struct bio *bio, int error) */ one_write_done(r10_bio); if (dec_rdev) - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); } /* @@ -612,20 +612,24 @@ static int raid10_mergeable_bvec(struct request_queue *q, max = biovec->bv_len; if (mddev->merge_check_needed) { - struct r10bio r10_bio; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10_bio = &on_stack.r10_bio; int s; - r10_bio.sector = sector; - raid10_find_phys(conf, &r10_bio); + r10_bio->sector = sector; + raid10_find_phys(conf, r10_bio); rcu_read_lock(); for (s = 0; s < conf->copies; s++) { - int disk = r10_bio.devs[s].devnum; + int disk = r10_bio->devs[s].devnum; struct md_rdev *rdev = rcu_dereference( conf->mirrors[disk].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -637,7 +641,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -1178,18 +1182,21 @@ retry_write: blocked_rdev = rrdev; break; } + if (rdev && (test_bit(Faulty, &rdev->flags) + || test_bit(Unmerged, &rdev->flags))) + rdev = NULL; if (rrdev && (test_bit(Faulty, &rrdev->flags) || test_bit(Unmerged, &rrdev->flags))) rrdev = NULL; r10_bio->devs[i].bio = NULL; r10_bio->devs[i].repl_bio = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags) || - test_bit(Unmerged, &rdev->flags)) { + + if (!rdev && !rrdev) { set_bit(R10BIO_Degraded, &r10_bio->state); continue; } - if (test_bit(WriteErrorSeen, &rdev->flags)) { + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; int bad_sectors; @@ -1231,8 +1238,10 @@ retry_write: max_sectors = good_sectors; } } - r10_bio->devs[i].bio = bio; - atomic_inc(&rdev->nr_pending); + if (rdev) { + r10_bio->devs[i].bio = bio; + atomic_inc(&rdev->nr_pending); + } if (rrdev) { r10_bio->devs[i].repl_bio = bio; atomic_inc(&rrdev->nr_pending); @@ -1288,51 +1297,52 @@ retry_write: for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; - if (!r10_bio->devs[i].bio) - continue; + if (r10_bio->devs[i].bio) { + struct md_rdev *rdev = conf->mirrors[d].rdev; + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr+ + rdev->data_offset); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua; + mbio->bi_private = r10_bio; - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].bio = mbio; - - mbio->bi_sector = (r10_bio->devs[i].addr+ - conf->mirrors[d].rdev->data_offset); - mbio->bi_bdev = conf->mirrors[d].rdev->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua; - mbio->bi_private = r10_bio; - - atomic_inc(&r10_bio->remaining); - spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; - spin_unlock_irqrestore(&conf->device_lock, flags); - - if (!r10_bio->devs[i].repl_bio) - continue; - - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].repl_bio = mbio; + atomic_inc(&r10_bio->remaining); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + spin_unlock_irqrestore(&conf->device_lock, flags); + } - /* We are actively writing to the original device - * so it cannot disappear, so the replacement cannot - * become NULL here - */ - mbio->bi_sector = (r10_bio->devs[i].addr+ - conf->mirrors[d].replacement->data_offset); - mbio->bi_bdev = conf->mirrors[d].replacement->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua; - mbio->bi_private = r10_bio; + if (r10_bio->devs[i].repl_bio) { + struct md_rdev *rdev = conf->mirrors[d].replacement; + if (rdev == NULL) { + /* Replacement just got moved to main 'rdev' */ + smp_mb(); + rdev = conf->mirrors[d].rdev; + } + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].repl_bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr+ + rdev->data_offset); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua; + mbio->bi_private = r10_bio; - atomic_inc(&r10_bio->remaining); - spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; - spin_unlock_irqrestore(&conf->device_lock, flags); + atomic_inc(&r10_bio->remaining); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + spin_unlock_irqrestore(&conf->device_lock, flags); + } } /* Don't remove the bias on 'remaining' (one_write_done) until @@ -3015,7 +3025,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, else { bad_sectors -= (sector - first_bad); if (max_sync > bad_sectors) - max_sync = max_sync; + max_sync = bad_sectors; continue; } } diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 7c615613c381..24d45b8af5c9 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -104,7 +104,7 @@ struct r10bio { * We choose the number when they are allocated. * We sometimes need an extra bio to write to the replacement. */ - struct { + struct r10dev { struct bio *bio; union { struct bio *repl_bio; /* used for resync and diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 73a58007eb99..0240576564dc 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -380,6 +380,8 @@ static int calc_degraded(struct r5conf *conf) degraded = 0; for (i = 0; i < conf->previous_raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = rcu_dereference(conf->disks[i].replacement); if (!rdev || test_bit(Faulty, &rdev->flags)) degraded++; else if (test_bit(In_sync, &rdev->flags)) @@ -404,6 +406,8 @@ static int calc_degraded(struct r5conf *conf) degraded2 = 0; for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = rcu_dereference(conf->disks[i].replacement); if (!rdev || test_bit(Faulty, &rdev->flags)) degraded2++; else if (test_bit(In_sync, &rdev->flags)) diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c index 664e460f247b..aac622200e99 100644 --- a/drivers/media/dvb/siano/smsusb.c +++ b/drivers/media/dvb/siano/smsusb.c @@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf) return 0; } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = { { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0100), diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index 0e49c99abf68..c06992e1320d 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev = rc_allocate_device(); if (!rdev) goto failure; + itdev->rdev = rdev; ret = -ENODEV; @@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id if (ret) goto failure; - itdev->rdev = rdev; ite_pr(KERN_NOTICE, "driver has been successfully loaded\n"); return 0; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 6e16b09c24a9..cec1f8c05e67 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -775,9 +775,12 @@ static ssize_t show_protocols(struct device *device, if (dev->driver_type == RC_DRIVER_SCANCODE) { enabled = dev->rc_map.rc_type; allowed = dev->allowed_protos; - } else { + } else if (dev->raw) { enabled = dev->raw->enabled_protocols; allowed = ir_raw_get_allowed_protocols(); + } else { + mutex_unlock(&dev->lock); + return -ENODEV; } IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c index 0b3e481ffe8c..eab06419473e 100644 --- a/drivers/media/video/au0828/au0828-video.c +++ b/drivers/media/video/au0828/au0828-video.c @@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv, (AUVI_INPUT(i).audio_setup)(dev, 0); } - videobuf_streamoff(&fh->vb_vidq); - res_free(fh, AU0828_RESOURCE_VIDEO); + if (res_check(fh, AU0828_RESOURCE_VIDEO)) { + videobuf_streamoff(&fh->vb_vidq); + res_free(fh, AU0828_RESOURCE_VIDEO); + } } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { dev->vbi_timeout_running = 0; del_timer_sync(&dev->vbi_timeout); - videobuf_streamoff(&fh->vb_vbiq); - res_free(fh, AU0828_RESOURCE_VBI); + if (res_check(fh, AU0828_RESOURCE_VBI)) { + videobuf_streamoff(&fh->vb_vbiq); + res_free(fh, AU0828_RESOURCE_VBI); + } } return 0; diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c index 7930ca58349f..235bf7da6c04 100644 --- a/drivers/media/video/cx25821/cx25821-core.c +++ b/drivers/media/video/cx25821/cx25821-core.c @@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) list_add_tail(&dev->devlist, &cx25821_devlist); mutex_unlock(&cx25821_devlist_mutex); - strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown"); - strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821"); - if (dev->pci->device != 0x8210) { pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n", __func__, dev->pci->device); diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h index b9aa801b00a7..029f2934a6d8 100644 --- a/drivers/media/video/cx25821/cx25821.h +++ b/drivers/media/video/cx25821/cx25821.h @@ -187,7 +187,7 @@ enum port { }; struct cx25821_board { - char *name; + const char *name; enum port porta; enum port portb; enum port portc; diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c index 53f58ef367cf..3264d878cfde 100644 --- a/drivers/media/video/gspca/jl2005bcd.c +++ b/drivers/media/video/gspca/jl2005bcd.c @@ -510,7 +510,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const __devinitdata struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0227)}, {} }; diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c index 30662fccb0cf..63f571b7cc22 100644 --- a/drivers/media/video/gspca/pac7302.c +++ b/drivers/media/video/gspca/pac7302.c @@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x262a)}, {USB_DEVICE(0x093a, 0x262c)}, {USB_DEVICE(0x145f, 0x013c)}, + {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */ {} }; MODULE_DEVICE_TABLE(usb, device_table); diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c index 54eed87672d2..21e6a37bc7a8 100644 --- a/drivers/media/video/gspca/spca506.c +++ b/drivers/media/video/gspca/spca506.c @@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 {USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c index 8f54e24e3f35..4261bf231932 100644 --- a/drivers/media/video/uvc/uvc_queue.c +++ b/drivers/media/video/uvc/uvc_queue.c @@ -355,6 +355,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { buf->error = 0; buf->state = UVC_BUF_STATE_QUEUED; + buf->bytesused = 0; vb2_set_plane_payload(&buf->buf, 0, 0); return buf; } diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index ca881efedf75..746a59c29fed 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -18,12 +18,19 @@ #include <linux/mfd/core.h> #include <linux/mfd/max8925.h> +static struct resource io_parent = { + .start = 0, + .end = 0xffffffff, + .flags = IORESOURCE_IO, +}; + static struct resource backlight_resources[] = { { .name = "max8925-backlight", .start = MAX8925_WLED_MODE_CNTL, .end = MAX8925_WLED_CNTL, .flags = IORESOURCE_IO, + .parent = &io_parent, }, }; @@ -42,6 +49,7 @@ static struct resource touch_resources[] = { .start = MAX8925_TSC_IRQ, .end = MAX8925_ADC_RES_END, .flags = IORESOURCE_IO, + .parent = &io_parent, }, }; @@ -60,6 +68,7 @@ static struct resource power_supply_resources[] = { .start = MAX8925_CHG_IRQ1, .end = MAX8925_CHG_IRQ1_MASK, .flags = IORESOURCE_IO, + .parent = &io_parent, }, }; @@ -118,6 +127,7 @@ static struct mfd_cell onkey_devs[] = { .start = MAX8925_##_start, \ .end = MAX8925_##_end, \ .flags = IORESOURCE_IO, \ + .parent = &io_parent, \ } static struct resource regulator_resources[] = { diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index ffc3d48676ae..ef80da67eb53 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -19,6 +19,10 @@ #include <linux/slab.h> #include <linux/module.h> +static struct device_type mfd_dev_type = { + .name = "mfd_device", +}; + int mfd_cell_enable(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); @@ -88,6 +92,7 @@ static int mfd_add_device(struct device *parent, int id, goto fail_device; pdev->dev.parent = parent; + pdev->dev.type = &mfd_dev_type; if (cell->pdata_size) { ret = platform_device_add_data(pdev, @@ -183,10 +188,16 @@ EXPORT_SYMBOL(mfd_add_devices); static int mfd_remove_devices_fn(struct device *dev, void *c) { - struct platform_device *pdev = to_platform_device(dev); - const struct mfd_cell *cell = mfd_get_cell(pdev); + struct platform_device *pdev; + const struct mfd_cell *cell; atomic_t **usage_count = c; + if (dev->type != &mfd_dev_type) + return 0; + + pdev = to_platform_device(dev); + cell = mfd_get_cell(pdev); + /* find the base address of usage_count pointers (for freeing) */ if (!*usage_count || (cell->usage_count < *usage_count)) *usage_count = cell->usage_count; diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 9d7ca1e978fa..8d46e68cb178 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -541,6 +541,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq) case 1: case 2: case 3: + case 4: regmap_patch = wm1811_reva_patch; patch_regs = ARRAY_SIZE(wm1811_reva_patch); break; diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 8d082b46426b..d971817182f7 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -53,6 +53,10 @@ #include <linux/kthread.h> #include "xpc.h" +#ifdef CONFIG_X86_64 +#include <asm/traps.h> +#endif + /* define two XPC debug device structures to be used with dev_dbg() et al */ struct device_driver xpc_dbg_name = { @@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) return NOTIFY_DONE; } +/* Used to only allow one cpu to complete disconnect */ +static unsigned int xpc_die_disconnecting; + /* * Notify other partitions to deactivate from us by first disengaging from all * references to our memory. @@ -1092,6 +1099,9 @@ xpc_die_deactivate(void) long keep_waiting; long wait_to_print; + if (cmpxchg(&xpc_die_disconnecting, 0, 1)) + return; + /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; @@ -1159,7 +1169,7 @@ xpc_die_deactivate(void) * about the lack of a heartbeat. */ static int -xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) { #ifdef CONFIG_IA64 /* !!! temporary kludge */ switch (event) { @@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) break; } #else - xpc_die_deactivate(); + struct die_args *die_args = _die_args; + + switch (event) { + case DIE_TRAP: + if (die_args->trapnr == X86_TRAP_DF) + xpc_die_deactivate(); + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && + !user_mode_vm(die_args->regs)) + xpc_die_deactivate(); + + break; + case DIE_INT3: + case DIE_DEBUG: + break; + case DIE_OOPS: + case DIE_GPF: + default: + xpc_die_deactivate(); + } #endif return NOTIFY_DONE; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 17bbacb1b4b1..cc2ae7ec0d22 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/uv/uv_hub.h> @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 480f7178782f..cc34388941aa 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1843,7 +1843,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -2148,6 +2149,7 @@ force_ro_fail: #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -2184,6 +2186,28 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index e3f5af96ab87..b6def202bf4d 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -280,11 +280,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 71a0c4ea1bc0..102537733482 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2097,8 +2097,7 @@ static int omap_hsmmc_suspend(struct device *dev) if (ret) { host->suspended = 0; if (host->pdata->resume) { - ret = host->pdata->resume(dev, host->slot_id); - if (ret) + if (host->pdata->resume(dev, host->slot_id)) dev_dbg(dev, "Unmask interrupt failed\n"); } goto err; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 8abdaf6697a8..be46052228b7 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -232,15 +232,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + if (unlikely(reg == SDHCI_HOST_VERSION)) { - u16 val = readw(host->ioaddr + (reg ^ 2)); - /* - * uSDHC supports SDHCI v3.0, but it's encoded as value - * 0x3 in host controller version register, which violates - * SDHCI_SPEC_300 definition. Work it around here. - */ - if ((val & SDHCI_SPEC_VER_MASK) == 3) - return --val; + reg ^= 2; + if (is_imx6q_usdhc(imx_data)) { + /* + * The usdhc register returns a wrong host version. + * Correct it here. + */ + return SDHCI_SPEC_300; + } } return readw(host->ioaddr + reg); diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 55a164fcaa15..9e2c5585efaa 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -656,7 +656,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); - for (ptr = 0; ptr < 3; ptr++) { + for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { if (sc->clk_bus[ptr]) { clk_disable(sc->clk_bus[ptr]); clk_put(sc->clk_bus[ptr]); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 22734914374f..585699983248 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1297,16 +1297,19 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) */ if ((host->flags & SDHCI_NEEDS_RETUNING) && !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { - /* eMMC uses cmd21 while sd and sdio use cmd19 */ - tuning_opcode = mmc->card->type == MMC_TYPE_MMC ? - MMC_SEND_TUNING_BLOCK_HS200 : - MMC_SEND_TUNING_BLOCK; - spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, tuning_opcode); - spin_lock_irqsave(&host->lock, flags); - - /* Restore original mmc_request structure */ - host->mrq = mrq; + if (mmc->card) { + /* eMMC uses cmd21 but sd and sdio use cmd19 */ + tuning_opcode = + mmc->card->type == MMC_TYPE_MMC ? + MMC_SEND_TUNING_BLOCK_HS200 : + MMC_SEND_TUNING_BLOCK; + spin_unlock_irqrestore(&host->lock, flags); + sdhci_execute_tuning(mmc, tuning_opcode); + spin_lock_irqsave(&host->lock, flags); + + /* Restore original mmc_request structure */ + host->mrq = mrq; + } } if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 724b35e85a26..a5786a837a29 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1066,7 +1066,6 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; struct mmc_request *mrq = host->mrq; - struct mmc_data *data = mrq->data; cancel_delayed_work_sync(&host->timeout_work); @@ -1114,13 +1113,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) case MMCIF_WAIT_FOR_READ_END: case MMCIF_WAIT_FOR_WRITE_END: if (host->sd_error) - data->error = sh_mmcif_error_manage(host); + mrq->data->error = sh_mmcif_error_manage(host); break; default: BUG(); } if (host->wait_for != MMCIF_WAIT_FOR_STOP) { + struct mmc_data *data = mrq->data; if (!mrq->cmd->error && data && !data->error) data->bytes_xfered = data->blocks * data->blksz; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8f52fc858e48..5a5cd2ace4a6 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c index e5bfd0e093bb..0598d52eaf9f 100644 --- a/drivers/mtd/maps/autcpu12-nvram.c +++ b/drivers/mtd/maps/autcpu12-nvram.c @@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = { static int __init init_autcpu12_sram (void) { - int err, save0, save1; + map_word tmp, save0, save1; + int err; autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); if (!autcpu12_sram_map.virt) { @@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void) err = -EIO; goto out; } - simple_map_init(&autcpu_sram_map); + simple_map_init(&autcpu12_sram_map); /* * Check for 32K/128K @@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void) * Read and check result on ofs 0x0 * Restore contents */ - save0 = map_read32(&autcpu12_sram_map,0); - save1 = map_read32(&autcpu12_sram_map,0x10000); - map_write32(&autcpu12_sram_map,~save0,0x10000); + save0 = map_read(&autcpu12_sram_map, 0); + save1 = map_read(&autcpu12_sram_map, 0x10000); + tmp.x[0] = ~save0.x[0]; + map_write(&autcpu12_sram_map, tmp, 0x10000); /* if we find this pattern on 0x0, we have 32K size * restore contents and exit */ - if ( map_read32(&autcpu12_sram_map,0) != save0) { - map_write32(&autcpu12_sram_map,save0,0x0); + tmp = map_read(&autcpu12_sram_map, 0); + if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) { + map_write(&autcpu12_sram_map, save0, 0x0); goto map; } /* We have a 128K found, restore 0x10000 and set size * to 128K */ - map_write32(&autcpu12_sram_map,save1,0x10000); + map_write(&autcpu12_sram_map, save1, 0x10000); autcpu12_sram_map.size = SZ_128K; map: diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 9651c06de0a9..bf24aa77175d 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -709,6 +709,8 @@ static const char *default_mtd_part_types[] = { * partition parsers, specified in @types. However, if @types is %NULL, then * the default list of parsers is used. The default list contains only the * "cmdlinepart" and "ofpart" parsers ATM. + * Note: If there are more then one parser in @types, the kernel only takes the + * partitions parsed out by the first parser. * * This function may return: * o a negative error code in case of failure @@ -733,11 +735,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types, if (!parser) continue; ret = (*parser->parse_fn)(master, pparts, data); + put_partition_parser(parser); if (ret > 0) { printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", ret, parser->name, master->name); + break; } - put_partition_parser(parser); } return ret; } diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index 821c34c62500..0b9b472b52be 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c @@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) this->ecc.hwctl = cs_enable_hwecc; this->ecc.calculate = cs_calculate_ecc; this->ecc.correct = nand_correct_data; + this->ecc.strength = 1; /* Enable the following for a flash based bad block table */ this->bbt_options = NAND_BBT_USE_FLASH; @@ -248,8 +249,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) goto out_ior; } - this->ecc.strength = 1; - new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); cs553x_mtd[cs] = new_mtd; diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index e8ea7107932e..022feb45c15e 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -135,6 +135,15 @@ int gpmi_init(struct gpmi_nand_data *this) if (ret) goto err_out; + /* + * Reset BCH here, too. We got failures otherwise :( + * See later BCH reset for explanation of MX23 handling + */ + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + if (ret) + goto err_out; + + /* Choose NAND mode. */ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 47b19c0bb070..eb9f5fb02eef 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2897,9 +2897,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, if (le16_to_cpu(p->features) & 1) *busw = NAND_BUSWIDTH_16; - chip->options &= ~NAND_CHIPOPTIONS_MSK; - chip->options |= (NAND_NO_READRDY | - NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; + chip->options |= NAND_NO_READRDY | NAND_NO_AUTOINCR; pr_info("ONFI flash detected\n"); return 1; @@ -3064,9 +3062,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, mtd->erasesize <<= ((id_data[3] & 0x03) << 1); } } - /* Get chip options, preserve non chip based options */ - chip->options &= ~NAND_CHIPOPTIONS_MSK; - chip->options |= type->options & NAND_CHIPOPTIONS_MSK; + /* Get chip options */ + chip->options |= type->options; /* * Check if chip is not a Samsung device. Do not clear the diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 30d1319ff065..c126469b0645 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, /* Read the mirror version, if available */ if (md && (md->options & NAND_BBT_VERSION)) { scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, - mtd->writesize, td); + mtd->writesize, md); md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; pr_info("Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]); diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index c606b6afd7aa..b9cbd65f49b5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -2355,6 +2355,7 @@ static int __init ns_init_module(void) uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); + retval = -EINVAL; goto err_exit; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index c2b0bba9d8b3..62d039af1023 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1133,7 +1133,8 @@ static int omap_nand_remove(struct platform_device *pdev) /* Release NAND device, its internal structures and partitions */ nand_release(&info->mtd); iounmap(info->nand.IO_ADDR_R); - kfree(&info->mtd); + release_mem_region(info->phys_base, NAND_IO_SIZE); + kfree(info); return 0; } diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 64be8f0848b0..d9127e2ed808 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, nr_parts = plen / sizeof(part[0]); *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); - if (!pparts) + if (!*pparts) return -ENOMEM; names = of_get_property(dp, "partition-names", &plen); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 0fde9fc7d2e5..83bab2c79834 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id) struct ubi_volume *vol = ubi->volumes[vol_id]; int err, old_reserved_pebs = vol->reserved_pebs; + if (ubi->ro_mode) { + ubi_warn("skip auto-resize because of R/O mode"); + return 0; + } + /* * Clear the auto-resize flag in the volume in-memory copy of the * volume table, and 'ubi_resize_volume()' will propagate this change diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 17cec0c01544..c015fc0a76d8 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -346,7 +346,7 @@ retry: */ err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec, vid_hdr, 0); - kfree(new_seb); + kmem_cache_free(si->scan_leb_slab, new_seb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -359,7 +359,7 @@ write_error: list_add(&new_seb->u.list, &si->erase); goto retry; } - kfree(new_seb); + kmem_cache_free(si->scan_leb_slab, new_seb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 318a62a4db52..6df52c9ba499 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1383,6 +1383,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; read_lock(&bond->lock); @@ -1396,11 +1398,16 @@ static void bond_compute_features(struct bonding *bond) if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); read_unlock(&bond->lock); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index aef42f045320..6734737c9530 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1578,6 +1578,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, goto out; } + read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) @@ -1586,6 +1587,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, slave->inactive = 1; } } + read_unlock(&bond->lock); out: return ret; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 86cd532c78f9..21a3d77ea7e2 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -914,7 +914,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL); break; case LEC_BIT1_ERROR: @@ -927,7 +927,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL); break; default: diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index c5fe3a3db8c9..e86f4c37f981 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -576,8 +576,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - if (del_timer_sync(&priv->restart_timer)) - dev_put(dev); + del_timer_sync(&priv->restart_timer); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 08c893cb7896..e7823dd989f4 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1250,7 +1250,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id) */ static int ican3_reset_module(struct ican3_dev *mod) { - u8 val = 1 << mod->num; unsigned long start; u8 runold, runnew; @@ -1264,8 +1263,7 @@ static int ican3_reset_module(struct ican3_dev *mod) runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ - iowrite8(val, &mod->ctrl->reset_assert); - iowrite8(val, &mod->ctrl->reset_deassert); + iowrite8(0x00, &mod->dpmctrl->hwreset); /* wait until the module has finished resetting and is running */ start = jiffies; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 346785c56a25..9d6074273caa 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -83,6 +83,11 @@ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) #define INSTRUCTION_RESET 0xC0 +#define RTS_TXB0 0x01 +#define RTS_TXB1 0x02 +#define RTS_TXB2 0x04 +#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) + /* MPC251x registers */ #define CANSTAT 0x0e @@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN]; @@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); - mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ); + + /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ + priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); + mcp251x_spi_trans(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 5caa572d71e3..957f000263d0 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (!clock_name || !strcmp(clock_name, "sys")) { sys_clk = clk_get(&ofdev->dev, "sys_clk"); - if (!sys_clk) { + if (IS_ERR(sys_clk)) { dev_err(&ofdev->dev, "couldn't get sys_clk\n"); goto exit_unmap; } @@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (clocksrc < 0) { ref_clk = clk_get(&ofdev->dev, "ref_clk"); - if (!ref_clk) { + if (IS_ERR(ref_clk)) { dev_err(&ofdev->dev, "couldn't get ref_clk\n"); goto exit_unmap; } diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 2bb215e00eb1..ab13a57ae3bb 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) stats->rx_errors++; break; case PCH_CRC_ERR: - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; priv->can.can_stats.bus_error++; stats->rx_errors++; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 4accd7ec6954..19000aa86805 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } if (err_status & HECC_CANES_CRCE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; } if (err_status & HECC_CANES_ACKE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); - cf->data[2] |= CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL; } } @@ -984,12 +984,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); + unregister_candev(ndev); clk_disable(priv->clk); clk_put(priv->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(priv->base); release_mem_region(res->start, resource_size(res)); - unregister_candev(ndev); free_candev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 86f26a1ede4c..25723d8ee201 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, mc->pdev->dev.can.state = new_state; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts->hwtstamp = timeval_to_ktime(tv); } netif_rx(skb); @@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) struct sk_buff *skb; struct can_frame *cf; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) @@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) /* convert timestamp into kernel time */ peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); /* push the skb */ netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 629c4ba5d49d..c95913a09737 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct can_frame *can_frame; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(netdev, &can_frame); if (!skb) @@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, memcpy(can_frame->data, rx->data, can_frame->can_dlc); peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; @@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, u8 err_mask = 0; struct sk_buff *skb; struct timeval tv; + struct skb_shared_hwtstamps *hwts; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) @@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, dev->can.state = new_state; peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); - skb->tstamp = timeval_to_ktime(tv); + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index d04911d33b64..47618e505355 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 41bb34fcf687..acd824660367 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -571,14 +571,16 @@ drop: static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, struct bnx2x_fastpath *fp) { - /* Do nothing if no IP/L4 csum validation was done */ - + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ if (cqe->fast_path_cqe.status_flags & - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) return; - /* If both IP/L4 validation were done, check if an error was found. */ + /* If L4 validation was done, check if an error was found. */ if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6af310195bae..b8e7f3e082e9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9131,10 +9131,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } } } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 689d2a1935bd..e143d8c0be5b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1136,14 +1136,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); } -#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ - MII_TG3_AUXCTL_ACTL_TX_6DB) +static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) +{ + u32 val; + int err; -#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_TX_6DB); + err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + + if (err) + return err; + if (enable) + + val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + else + val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + + err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_TX_6DB); + + return err; +} static int tg3_bmcr_reset(struct tg3 *tp) { @@ -2076,7 +2088,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp) otp = tp->phy_otp; - if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + if (tg3_phy_toggle_auxctl_smdsp(tp, true)) return; phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); @@ -2101,7 +2113,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp) ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) @@ -2137,9 +2149,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) if (!tp->setlpicnt) { if (current_link_up == 1 && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } val = tr32(TG3_CPMU_EEE_MODE); @@ -2155,11 +2167,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp) (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || tg3_flag(tp, 57765_CLASS)) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO; tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } val = tr32(TG3_CPMU_EEE_MODE); @@ -2303,7 +2315,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) tg3_writephy(tp, MII_CTRL1000, CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + err = tg3_phy_toggle_auxctl_smdsp(tp, true); if (err) return err; @@ -2324,7 +2336,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); tg3_writephy(tp, MII_CTRL1000, phy9_orig); @@ -2413,10 +2425,10 @@ static int tg3_phy_reset(struct tg3 *tp) out: if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, 0x201f, 0x2aaa); tg3_phydsp_write(tp, 0x000a, 0x0323); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { @@ -2425,14 +2437,14 @@ out: } if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, 0x000a, 0x310b); tg3_phydsp_write(tp, 0x201f, 0x9506); tg3_phydsp_write(tp, 0x401f, 0x14e2); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); @@ -2441,7 +2453,7 @@ out: } else tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } } @@ -3858,7 +3870,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) tw32(TG3_CPMU_EEE_MODE, tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + err = tg3_phy_toggle_auxctl_smdsp(tp, true); if (!err) { u32 err2; @@ -3891,7 +3903,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) MII_TG3_DSP_CH34TP2_HIBW01); } - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); if (!err) err = err2; } @@ -6574,6 +6586,9 @@ static void tg3_poll_controller(struct net_device *dev) int i; struct tg3 *tp = netdev_priv(dev); + if (tg3_irq_sync(tp)) + return; + for (i = 0; i < tp->irq_cnt; i++) tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); } @@ -15529,6 +15544,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->pm_cap = pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; + tp->irq_sync = 1; if (tg3_debug > 0) tp->msg_enable = tg3_debug; diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 11f667f6131a..4ebbe6f609d0 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -547,6 +547,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) return -1; } + /* All frames should fit into a single buffer */ + if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) + return -1; + /* Check if packet has checksum already */ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && !(ext_status & RXDESC_IP_PAYLOAD_MASK)) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 9576ac002c23..dcb02c219ddf 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -536,6 +536,11 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter) adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; } +static inline bool is_ipv4_pkt(struct sk_buff *skb) +{ + return skb->protocol == ntohs(ETH_P_IP) && ip_hdr(skb)->version == 4; +} + static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) { u32 addr; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1bbf6b3eca9c..ef1f9400b967 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -571,6 +571,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, return vlan_tag; } +static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb) +{ + return vlan_tx_tag_present(skb) || adapter->pvid; +} + static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, u32 wrb_cnt, u32 len) { @@ -698,33 +703,56 @@ dma_err: return 0; } +static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, + struct sk_buff *skb) +{ + u16 vlan_tag = 0; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return skb; + + if (vlan_tx_tag_present(skb)) { + vlan_tag = be_get_tx_vlan_tag(adapter, skb); + __vlan_put_tag(skb, vlan_tag); + skb->vlan_tci = 0; + } + + return skb; +} + static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; struct be_queue_info *txq = &txo->q; + struct iphdr *ip = NULL; u32 wrb_cnt = 0, copied = 0; - u32 start = txq->head; + u32 start = txq->head, eth_hdr_len; bool dummy_wrb, stopped = false; - /* For vlan tagged pkts, BE - * 1) calculates checksum even when CSO is not requested - * 2) calculates checksum wrongly for padded pkt less than - * 60 bytes long. - * As a workaround disable TX vlan offloading in such cases. + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + + /* HW has a bug which considers padding bytes as legal + * and modifies the IPv4 hdr's 'tot_len' field */ - if (unlikely(vlan_tx_tag_present(skb) && - (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { - skb = skb_share_check(skb, GFP_ATOMIC); - if (unlikely(!skb)) - goto tx_drop; + if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) && + is_ipv4_pkt(skb)) { + ip = (struct iphdr *)ip_hdr(skb); + pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); + } - skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb)); + /* HW has a bug wherein it will calculate CSUM for VLAN + * pkts even though it is disabled. + * Manually insert VLAN in pkt. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL && + be_vlan_tag_chk(adapter, skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb); if (unlikely(!skb)) goto tx_drop; - - skb->vlan_tci = 0; } wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 24381e1298e2..0819a7403847 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1037,7 +1037,7 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 9010cea68bc3..b68d28a130e6 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if (adapter->rx_queue.queue_addr != NULL) { - if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { - dma_unmap_single(dev, - adapter->rx_queue.queue_dma, - adapter->rx_queue.queue_len, - DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - } - kfree(adapter->rx_queue.queue_addr); + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); adapter->rx_queue.queue_addr = NULL; } @@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } + dev = &adapter->vdev->dev; + adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; - adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, - GFP_KERNEL); + adapter->rx_queue.queue_addr = + dma_alloc_coherent(dev, adapter->rx_queue.queue_len, + &adapter->rx_queue.queue_dma, GFP_KERNEL); if (!adapter->rx_queue.queue_addr) { netdev_err(netdev, "unable to allocate rx queue pages\n"); @@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } - dev = &adapter->vdev->dev; - adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(dev, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma)) || - (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { + (dma_mapping_error(dev, adapter->filter_list_dma))) { netdev_err(netdev, "unable to map filter or buffer list " "pages\n"); rc = -ENOMEM; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index b83897f76ee3..1ab8067b028b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -175,13 +175,13 @@ struct e1000_info; /* * in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when - * WTHRESH=4, and since we want 64 bytes at a time written back, set - * it to 5 + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 */ #define E1000_TXDCTL_DMA_BURST_ENABLE \ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ E1000_TXDCTL_COUNT_DESC | \ - (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 16) | /* wthresh must be +1 more than desired */\ (1 << 8) | /* hthresh */ \ 0x1f) /* pthresh */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5621d5b28765..7e88aaff6ecc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2806,7 +2806,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * set up some performance related parameters to encourage the * hardware to use the bus more efficiently in bursts, depends * on the tx_int_delay to be enabled, - * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8683ca4748c8..1fb180dbc7b1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -951,17 +951,18 @@ static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - int i, err = 0, vector = 0; + int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, igb_msix_other, 0, netdev->name, adapter); if (err) - goto out; - vector++; + goto err_out; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; + vector++; + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); if (q_vector->rx.ring && q_vector->tx.ring) @@ -980,13 +981,22 @@ static int igb_request_msix(struct igb_adapter *adapter) igb_msix_ring, 0, q_vector->name, q_vector); if (err) - goto out; - vector++; + goto err_free; } igb_configure_msix(adapter); return 0; -out: + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: return err; } @@ -4639,11 +4649,13 @@ void igb_update_stats(struct igb_adapter *adapter, bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; - ring->rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 49aa41fe7b84..ab4d4d219e37 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3216,6 +3216,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index cfe7d269590c..6757d6c30fe7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2006,6 +2006,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, retval = 0; break; case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: /* check eeprom to see if enabled wol */ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a66c2159b1f6..6d1f6c512626 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -114,6 +114,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, /* required last entry */ {0, } }; @@ -7060,6 +7061,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->wol = IXGBE_WUFC_MAG; break; case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: /* Check eeprom to see if it is enabled */ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 8636e8344fc9..37eb39c013a0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -67,6 +67,7 @@ #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_X540T1 0x1560 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5a30bf823099..f4be8f7c2866 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4153,6 +4153,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = { DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, {} }; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 589753fb673b..2b78ddd5d631 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -3079,8 +3079,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) /* Reading this mask interrupts as side effect */ status = sky2_read32(hw, B0_Y2_SP_ISRC2); - if (status == 0 || status == ~0) + if (status == 0 || status == ~0) { + sky2_write32(hw, B0_Y2_SP_ICR, 2); return IRQ_NONE; + } prefetch(&hw->st_le[hw->st_idx]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 17968244c399..efa3a13f8b1d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -683,10 +683,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->tx_csum++; } - /* Copy dst mac address to wqe */ - ethh = (struct ethhdr *)skb->data; - tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); - tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); + if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { + /* Copy dst mac address to wqe. This allows loopback in eSwitch, + * so that VFs and PF can communicate with each other + */ + ethh = (struct ethhdr *)skb->data; + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); + } + /* Handle LSO (TSO) packets */ if (lso_header_size) { /* Mark opcode as LSO */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 8bb05b46db86..1995cb05acf2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1526,15 +1526,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) int i; if (msi_x) { - /* In multifunction mode each function gets 2 msi-X vectors - * one for data path completions anf the other for asynch events - * or command completions */ - if (mlx4_is_mfunc(dev)) { - nreq = 2; - } else { - nreq = min_t(int, dev->caps.num_eqs - - dev->caps.reserved_eqs, nreq); - } + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, + nreq); entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 0c5edc17a1e0..077bb00cbac2 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1523,6 +1523,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) pldat->dma_buff_base_p); free_irq(ndev->irq, ndev); iounmap(pldat->net_base); + mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); clk_disable(pldat->clk); clk_put(pldat->clk); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 718b27440351..83538ccddebb 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) buffrag->length, PCI_DMA_TODEVICE); buffrag->dma = 0ULL; } - for (j = 0; j < cmd_buf->frag_count; j++) { + for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { pci_unmap_page(adapter->pdev, buffrag->dma, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 65a718f9ccd3..7ee9c740085e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1370,6 +1370,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) struct pci_dev *root = pdev->bus->self; u32 aer_pos; + /* root bus? */ + if (!root) + return; + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; @@ -1952,10 +1956,12 @@ unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + nf->dma = 0ULL; } nf = &pbuf->frag_array[0]; pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + nf->dma = 0ULL; out_err: return -ENOMEM; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6fbf75564daa..58d039cb5267 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -73,7 +73,7 @@ static const int multicast_filter_limit = 32; #define MAX_READ_REQUEST_SHIFT 12 -#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -319,6 +319,8 @@ enum rtl_registers { Config0 = 0x51, Config1 = 0x52, Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, @@ -1401,7 +1403,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) u16 reg; u8 mask; } cfg[] = { - { WAKE_ANY, Config1, PMEnable }, { WAKE_PHY, Config3, LinkUp }, { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, @@ -1409,16 +1410,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_MCAST, Config5, MWF }, { WAKE_ANY, Config5, LanWake } }; + u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + RTL_W8(Cfg9346, Cfg9346_Lock); } @@ -3472,6 +3489,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32: @@ -3738,6 +3757,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_22: case RTL_GIGA_MAC_VER_23: case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34: RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; default: @@ -4112,6 +4132,9 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + RTL_W32(MAR0 + 4, mc_filter[1]); RTL_W32(MAR0 + 0, mc_filter[0]); @@ -5428,13 +5451,6 @@ process_pkt: tp->rx_stats.bytes += pkt_size; u64_stats_update_end(&tp->rx_stats.syncp); } - - /* Work around for AMD plateform. */ - if ((desc->opts2 & cpu_to_le32(0xfffe000)) && - (tp->mac_version == RTL_GIGA_MAC_VER_05)) { - desc->opts2 = 0; - cur_rx++; - } } count = cur_rx - tp->cur_rx; @@ -5494,11 +5510,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) if (status & LinkChg) __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); - napi_disable(&tp->napi); - rtl_irq_disable(tp); - - napi_enable(&tp->napi); - napi_schedule(&tp->napi); + rtl_irq_enable_all(tp); } static void rtl_task(struct work_struct *work) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4a0005342e65..954b8854b25a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1498,6 +1498,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2065,6 +2070,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index f22f45f515a8..ff64def0ffd7 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, @@ -857,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, + &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 94d0365b31cd..305430da48fb 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 34558766cbf0..06f2b495428c 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -851,6 +851,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); + chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index fcfa01f7ceb6..4c76db4f0589 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device *dev) rp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); } - dev_kfree_skb_irq(rp->tx_skbuff[entry]); + dev_kfree_skb(rp->tx_skbuff[entry]); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } @@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct work_struct *work) if (intr_status & IntrPCIErr) netif_warn(rp, hw, dev, "PCI error\n"); - napi_disable(&rp->napi); - rhine_irq_disable(rp); - /* Slow and safe. Consider __napi_schedule as a replacement ? */ - napi_enable(&rp->napi); - napi_schedule(&rp->napi); + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); out_unlock: mutex_unlock(&rp->task_lock); diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 5039f08f5a5b..43e9ab4f4d7e 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 32eb94ece6c1..a3d4707505a4 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb_orphan(skb); + /* Before queueing this packet to netif_rx(), + * make sure dst is refcounted. + */ + skb_dst_force(skb); + skb->protocol = eth_type_trans(skb, dev); /* it's OK to use per_cpu_ptr() because BHs are off */ diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index c1d602d5f15e..b99c418d6b2b 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -506,10 +506,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, if (copy > size) { ++from; --count; - } + offset = 0; + } else + offset += size; copy -= size; offset1 += size; - offset = 0; } if (len == offset1) @@ -519,25 +520,28 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, struct page *page[MAX_SKB_FRAGS]; int num_pages; unsigned long base; + unsigned long truesize; - len = from->iov_len - offset1; + len = from->iov_len - offset; if (!len) { - offset1 = 0; + offset = 0; ++from; continue; } - base = (unsigned long)from->iov_base + offset1; + base = (unsigned long)from->iov_base + offset; size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; if (i + size > MAX_SKB_FRAGS) return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); - if (num_pages != size) - /* put_page is in skb free */ - return -EFAULT; + if (num_pages != size) { + for (i = 0; i < num_pages; i++) + put_page(page[i]); + } + truesize = size * PAGE_SIZE; skb->data_len += len; skb->len += len; - skb->truesize += len; - atomic_add(len, &skb->sk->sk_wmem_alloc); + skb->truesize += truesize; + atomic_add(truesize, &skb->sk->sk_wmem_alloc); while (len) { int off = base & ~PAGE_MASK; int size = min_t(int, len, PAGE_SIZE - off); @@ -548,7 +552,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, len -= size; i++; } - offset1 = 0; + offset = 0; ++from; } return 0; @@ -712,10 +716,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!skb) goto err; - if (zerocopy) { + if (zerocopy) err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; - } else + else err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len); if (err) @@ -734,8 +737,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, rcu_read_lock_bh(); vlan = rcu_dereference_bh(q->vlan); /* copy skb_ubuf_info for callback when skb has no error */ - if (zerocopy) + if (zerocopy) { skb_shinfo(skb)->destructor_arg = m->msg_control; + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + } if (vlan) macvlan_start_xmit(skb, vlan->dev); else diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index f9347ea3d381..63ffbdfe36cc 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -648,7 +648,6 @@ static int netconsole_netdev_event(struct notifier_block *this, flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); } nt->enabled = 0; stopped = true; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2fa1a9b6f498..2e0d8762bb52 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock) po = pppox_sk(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 885dbdd9c39e..f617566bbbae 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 91d25888a1b9..1470d3e86e3c 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -79,6 +79,7 @@ static int rionet_capable = 1; * on system trade-offs. */ static struct rio_dev **rionet_active; +static int nact; /* total number of active rionet peers */ #define is_rionet_capable(src_ops, dst_ops) \ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ @@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct ethhdr *eth = (struct ethhdr *)skb->data; u16 destid; unsigned long flags; + int add_num = 1; local_irq_save(flags); if (!spin_trylock(&rnet->tx_lock)) { @@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_LOCKED; } - if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nact; + + if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { netif_stop_queue(ndev); spin_unlock_irqrestore(&rnet->tx_lock, flags); printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", @@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } if (is_multicast_ether_addr(eth->h_dest)) { + int count = 0; for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); i++) - if (rionet_active[i]) + if (rionet_active[i]) { rionet_queue_tx_msg(skb, ndev, rionet_active[i]); + if (count) + atomic_inc(&skb->users); + count++; + } } else if (RIONET_MAC_MATCH(eth->h_dest)) { destid = RIONET_GET_DESTID(eth->h_dest); if (rionet_active[destid]) @@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u if (info == RIONET_DOORBELL_JOIN) { if (!rionet_active[sid]) { list_for_each_entry(peer, &rionet_peers, node) { - if (peer->rdev->destid == sid) + if (peer->rdev->destid == sid) { rionet_active[sid] = peer->rdev; + nact++; + } } rio_mport_send_doorbell(mport, sid, RIONET_DOORBELL_JOIN); } } else if (info == RIONET_DOORBELL_LEAVE) { rionet_active[sid] = NULL; + nact--; } else { if (netif_msg_intr(rnet)) printk(KERN_WARNING "%s: unhandled doorbell\n", @@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) rc = rionet_setup_netdev(rdev->net->hport, ndev); rionet_check = 1; + nact = 0; } /* diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 609fcc331595..ef84a586c176 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -1604,6 +1604,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, }, { + // DLink DUB-E100 H/W Ver C1 + USB_DEVICE (0x2001, 0x1a02), + .driver_info = (unsigned long) &ax88772_info, +}, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), .driver_info = (unsigned long) &ax88178_info, diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 5cba41517f73..32e47918e59d 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -62,6 +62,7 @@ #define USB_PRODUCT_IPAD 0x129a #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 +#define USB_PRODUCT_IPHONE_5 0x12a8 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index c2ae4266617d..8669c77fa7c3 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -346,6 +346,15 @@ static const struct driver_info qmi_wwan_force_int1 = { .data = BIT(1), /* interface whitelist bitmap */ }; +static const struct driver_info qmi_wwan_force_int2 = { + .description = "Qualcomm WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind_shared, + .unbind = qmi_wwan_unbind_shared, + .manage_power = qmi_wwan_manage_power, + .data = BIT(2), /* interface whitelist bitmap */ +}; + static const struct driver_info qmi_wwan_force_int3 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, @@ -407,6 +416,14 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = HUAWEI_VENDOR_ID, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* Huawei E392, E398 and possibly others in "Windows mode" * using a combined control and data interface without any CDC * functional descriptors @@ -427,6 +444,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_shared, }, + { /* Pantech UML290 - newer firmware */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x106c, + .idProduct = 0x3718, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xf1, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_shared, + }, { /* ZTE MF820D */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19d2, @@ -436,6 +462,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE MF821D */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x0326, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int4, + }, { /* ZTE (Vodafone) K3520-Z */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19d2, @@ -472,6 +507,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE (Vodafone) K3765-Z */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x2002, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int4, + }, { /* ZTE (Vodafone) K4505-Z */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19d2, @@ -481,6 +525,24 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* ZTE (Vodafone) K5006-Z */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x1018, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int3, + }, + { /* ZTE MF60 */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x19d2, + .idProduct = 0x1402, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_force_int2, + }, { /* Sierra Wireless MC77xx in QMI mode */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1199, @@ -490,6 +552,33 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_sierra, }, + { /* Sierra Wireless MC7700 */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x0f3d, + .idProduct = 0x68a2, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_sierra, + }, + { /* Sierra Wireless MC7750 */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x114f, + .idProduct = 0x68a2, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_sierra, + }, + { /* Sierra Wireless EM7700 */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x1199, + .idProduct = 0x901c, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_sierra, + }, /* Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ @@ -517,6 +606,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -531,6 +622,10 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ + {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + { } /* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index cc9776c2a39a..8789bc58b176 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) return -EIO; } - *datap = *attrdata; + *datap = le16_to_cpu(*attrdata); kfree(attrdata); return result; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index c980431e44b7..0d988054cb66 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1158,6 +1158,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */ netif_stop_queue(net); + usb_put_urb(urb); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred; @@ -1299,6 +1300,8 @@ void usbnet_disconnect (struct usb_interface *intf) cancel_work_sync(&dev->kevent); + usb_scuttle_anchored_urbs(&dev->deferred); + if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index aaaca9aa2293..3f575afd8cfc 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/module.h> #include <linux/bitops.h> #include <linux/cdev.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 6650fde99e1d..9f1e947f3557 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h @@ -152,6 +152,9 @@ enum { /* Device IDs */ USB_DEVICE_ID_I6050 = 0x0186, USB_DEVICE_ID_I6050_2 = 0x0188, + USB_DEVICE_ID_I6150 = 0x07d6, + USB_DEVICE_ID_I6150_2 = 0x07d7, + USB_DEVICE_ID_I6150_3 = 0x07d9, USB_DEVICE_ID_I6250 = 0x0187, }; diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 29b1e033a10b..f0f16e0c6127 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface, switch (id->idProduct) { case USB_DEVICE_ID_I6050: case USB_DEVICE_ID_I6050_2: + case USB_DEVICE_ID_I6150: + case USB_DEVICE_ID_I6150_2: + case USB_DEVICE_ID_I6150_3: case USB_DEVICE_ID_I6250: i2400mu->i6050 = 1; break; @@ -759,6 +762,9 @@ static struct usb_device_id i2400mu_id_table[] = { { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, { USB_DEVICE(0x8086, 0x0181) }, { USB_DEVICE(0x8086, 0x1403) }, diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 0e643b016b32..c7d1437c7709 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -843,7 +843,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) return; dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len, DMA_TO_DEVICE); - dev_kfree_skb_any(bf->skb); + ieee80211_free_txskb(ah->hw, bf->skb); bf->skb = NULL; bf->skbaddr = 0; bf->desc->ds_data = 0; @@ -1570,7 +1570,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, return; drop_packet: - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } static void diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 5c5329955414..5d1be4daa56a 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -59,7 +59,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) u16 qnum = skb_get_queue_mapping(skb); if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) { - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); return; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 46c79a3d4737..4e2a52c342db 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -533,107 +533,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = { static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, - {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, - {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, - {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, - {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, - {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400}, - {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402}, - {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404}, - {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603}, - {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02}, - {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04}, - {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20}, - {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20}, - {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22}, - {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24}, - {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640}, - {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660}, - {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861}, - {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81}, - {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83}, - {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84}, - {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3}, - {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5}, - {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9}, - {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb}, - {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec}, - {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, - {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, - {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, - {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400}, - {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402}, - {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603}, - {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02}, - {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04}, - {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20}, - {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20}, - {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22}, - {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24}, - {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640}, - {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660}, - {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861}, - {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81}, - {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83}, - {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84}, - {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3}, - {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5}, - {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9}, - {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb}, - {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, - {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec}, + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, - {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, - {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000}, - {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000}, - {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501}, - {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501}, - {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03}, - {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, - {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04}, - {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, - {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, - {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, - {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, - {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005}, - {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, - {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, - {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000}, + {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000}, + {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501}, + {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501}, + {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352}, - {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584}, - {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800}, + {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, - {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, - {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, - {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001}, + {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, }; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 63089cc1fafd..9284bca6e16e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -938,6 +938,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, AR_PHY_CL_TAB_1, AR_PHY_CL_TAB_2 }; + ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); + if (rtt) { if (!ar9003_hw_rtt_restore(ah, chan)) run_rtt_cal = true; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index bb223fe82816..582ebc4bc5e0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -69,13 +69,13 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff -#define AR9300_OTP_BASE 0x14000 -#define AR9300_OTP_STATUS 0x15f18 +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA 0x15f1c +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index deb6cfb2959a..928a2ec91e14 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -543,7 +543,7 @@ static void ar9003_hw_init_bb(struct ath_hw *ah, udelay(synthDelay + BASE_ACTIVATE_DELAY); } -static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) { switch (rx) { case 0x5: diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 626418222c85..2e4243fcadc9 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -121,7 +121,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) if (ath_tx_start(hw, skb, &txctl) != 0) { ath_dbg(common, XMIT, "CABQ TX failed\n"); - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } } @@ -154,6 +154,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); bf->bf_buf_addr = 0; + bf->bf_mpdu = NULL; } /* Get a new beacon from mac80211 */ diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index e5cceb077574..bbd249df23e3 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) if (chan && chan->noisefloor) { s8 delta = chan->noisefloor - + ATH9K_NF_CAL_NOISE_THRESH - ath9k_hw_get_default_nf(ah, chan); if (delta > 0) noise += delta; diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index 3b33996d97df..1f2e01a96d22 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -21,6 +21,9 @@ #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 +/* Internal noise floor can vary by about 6db depending on the frequency */ +#define ATH9K_NF_CAL_NOISE_THRESH 6 + #define NUM_NF_READINGS 6 #define ATH9K_NF_CAL_HIST_MAX 5 diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index c25226a32ddc..ec86d981abc8 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); + } else { + kfree_skb(skb); } } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index f8e1fbbbfc5e..d5c5dca3d367 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1014,6 +1014,7 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); int ar9003_paprd_init_table(struct ath_hw *ah); bool ar9003_paprd_is_done(struct ath_hw *ah); void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains); +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); /* Hardware family op attach helpers */ void ar5008_hw_attach_phy_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index d5dabcb61a0a..91e2c4f53d14 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1147,7 +1147,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) return; exit: - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } static void ath9k_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index e44097a7529a..0e7d6c188ccf 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -122,8 +122,9 @@ static void ath_pci_aspm_init(struct ath_common *common) if (!parent) return; - if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { - /* Bluetooth coexistance requires disabling ASPM. */ + if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) && + (AR_SREV_9285(ah))) { + /* Bluetooth coexistance requires disabling ASPM for AR9285. */ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index a2f7ae81a414..039bac7e0c66 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -778,6 +778,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, return NULL; } + list_del(&bf->list); if (!bf->bf_mpdu) return bf; @@ -1774,7 +1775,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; - bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1796,6 +1796,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) tsf_lower = tsf & 0xffffffff; do { + bool decrypt_error = false; /* If handling rx interrupt and flush is in progress => exit */ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) break; @@ -1966,14 +1967,15 @@ requeue_drop_frag: sc->rx.frag = NULL; } requeue: + list_add_tail(&bf->list, &sc->rx.rxbuf); + if (flush) + continue; + if (edma) { - list_add_tail(&bf->list, &sc->rx.rxbuf); ath_rx_edma_buf_link(sc, qtype); } else { - list_move_tail(&bf->list, &sc->rx.rxbuf); ath_rx_buf_link(sc, bf); - if (!flush) - ath9k_hw_rxena(ah); + ath9k_hw_rxena(ah); } } while (1); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 4d571394c7a8..12a42f2c1e84 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -64,8 +64,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, - struct sk_buff *skb, - bool dequeue); + struct sk_buff *skb); enum { MCS_HT20, @@ -201,7 +200,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) fi = get_frame_info(skb); bf = fi->bf; - if (bf && fi->retries) { + if (!bf) { + bf = ath_tx_setup_buffer(sc, txq, tid, skb); + if (!bf) { + ieee80211_free_txskb(sc->hw, skb); + continue; + } + } + + if (fi->retries) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); @@ -330,6 +337,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) } bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); + bf->bf_next = NULL; list_del(&bf->list); spin_unlock_bh(&sc->tx.txbuflock); @@ -411,7 +419,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; - bool rc_update = true; + bool rc_update = true, isba; struct ieee80211_tx_rate rates[4]; struct ath_frame_info *fi; int nframes; @@ -455,13 +463,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tid = ATH_AN_2_TID(an, tidno); seq_first = tid->seq_start; + isba = ts->ts_flags & ATH9K_TX_BA; /* * The hardware occasionally sends a tx status for the wrong TID. * In this case, the BA status cannot be considered valid and all * subframes need to be retransmitted + * + * Only BlockAcks have a TID and therefore normal Acks cannot be + * checked */ - if (tidno != ts->tid) + if (isba && tidno != ts->tid) txok = false; isaggr = bf_isaggr(bf); @@ -812,10 +824,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, fi = get_frame_info(skb); bf = fi->bf; if (!fi->bf) - bf = ath_tx_setup_buffer(sc, txq, tid, skb, true); + bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) + if (!bf) { + __skb_unlink(skb, &tid->buf_q); + ieee80211_free_txskb(sc->hw, skb); continue; + } bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; seqno = bf->bf_state.seqno; @@ -1717,9 +1732,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, return; } - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); - if (!bf) + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) { + ieee80211_free_txskb(sc->hw, skb); return; + } bf->bf_state.bf_type = BUF_AMPDU; INIT_LIST_HEAD(&bf_head); @@ -1743,16 +1760,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf; bf = fi->bf; - if (!bf) - bf = ath_tx_setup_buffer(sc, txq, tid, skb, false); - - if (!bf) - return; INIT_LIST_HEAD(&bf_head); list_add_tail(&bf->list, &bf_head); bf->bf_state.bf_type = 0; + bf->bf_next = NULL; bf->bf_lastbf = bf; ath_tx_fill_desc(sc, bf, txq, fi->framelen); ath_tx_txqaddbuf(sc, txq, &bf_head, false); @@ -1820,8 +1833,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, - struct sk_buff *skb, - bool dequeue) + struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_frame_info *fi = get_frame_info(skb); @@ -1833,7 +1845,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, bf = ath_tx_get_buffer(sc); if (!bf) { ath_dbg(common, XMIT, "TX buffers are full\n"); - goto error; + return NULL; } ATH_TXBUF_RESET(bf); @@ -1862,18 +1874,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, ath_err(ath9k_hw_common(sc->sc_ah), "dma_mapping_error() on TX\n"); ath_tx_return_buffer(sc, bf); - goto error; + return NULL; } fi->bf = bf; return bf; - -error: - if (dequeue) - __skb_unlink(skb, &tid->buf_q); - dev_kfree_skb_any(skb); - return NULL; } /* FIXME: tx power */ @@ -1902,9 +1908,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, */ ath_tx_send_ampdu(sc, tid, skb, txctl); } else { - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); - if (!bf) + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) { + if (txctl->paprd) + dev_kfree_skb_any(skb); + else + ieee80211_free_txskb(sc->hw, skb); return; + } bf->bf_state.bfs_paprd = txctl->paprd; diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 67c13af6f206..05cb5f6b8d48 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -7,6 +7,7 @@ #include <linux/hw_random.h> #include <linux/bcma/bcma.h> #include <linux/ssb/ssb.h> +#include <linux/completion.h> #include <net/mac80211.h> #include "debugfs.h" @@ -718,6 +719,10 @@ enum b43_firmware_file_type { struct b43_request_fw_context { /* The device we are requesting the fw for. */ struct b43_wldev *dev; + /* a completion event structure needed if this call is asynchronous */ + struct completion fw_load_complete; + /* a pointer to the firmware object */ + const struct firmware *blob; /* The type of firmware to request. */ enum b43_firmware_file_type req_type; /* Error messages for each firmware type. */ diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index b5f1b91002bb..65f831faf2af 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -409,7 +409,10 @@ static inline struct b43_dmadesc_meta *meta) { if (meta->skb) { - dev_kfree_skb_any(meta->skb); + if (ring->tx) + ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); + else + dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } @@ -1454,7 +1457,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 315b96ed1d90..9fdd1983079c 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -169,7 +169,7 @@ struct b43_dmadesc_generic { /* DMA engine tuning knobs */ #define B43_TXRING_SLOTS 256 -#define B43_RXRING_SLOTS 64 +#define B43_RXRING_SLOTS 256 #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN) #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 0cd9f472a5f5..e59375ce0f45 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error) b43warn(wl, text); } +static void b43_fw_cb(const struct firmware *firmware, void *context) +{ + struct b43_request_fw_context *ctx = context; + + ctx->blob = firmware; + complete(&ctx->fw_load_complete); +} + int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name, - struct b43_firmware_file *fw) + struct b43_firmware_file *fw, bool async) { - const struct firmware *blob; struct b43_fw_header *hdr; u32 size; int err; @@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx, B43_WARN_ON(1); return -ENOSYS; } - err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); + if (async) { + /* do this part asynchronously */ + init_completion(&ctx->fw_load_complete); + err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname, + ctx->dev->dev->dev, GFP_KERNEL, + ctx, b43_fw_cb); + if (err < 0) { + pr_err("Unable to load firmware\n"); + return err; + } + /* stall here until fw ready */ + wait_for_completion(&ctx->fw_load_complete); + if (ctx->blob) + goto fw_ready; + /* On some ARM systems, the async request will fail, but the next sync + * request works. For this reason, we dall through here + */ + } + err = request_firmware(&ctx->blob, ctx->fwname, + ctx->dev->dev->dev); if (err == -ENOENT) { snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), - "Firmware file \"%s\" not found\n", ctx->fwname); + "Firmware file \"%s\" not found\n", + ctx->fwname); return err; } else if (err) { snprintf(ctx->errors[ctx->req_type], @@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx, ctx->fwname, err); return err; } - if (blob->size < sizeof(struct b43_fw_header)) +fw_ready: + if (ctx->blob->size < sizeof(struct b43_fw_header)) goto err_format; - hdr = (struct b43_fw_header *)(blob->data); + hdr = (struct b43_fw_header *)(ctx->blob->data); switch (hdr->type) { case B43_FW_TYPE_UCODE: case B43_FW_TYPE_PCM: size = be32_to_cpu(hdr->size); - if (size != blob->size - sizeof(struct b43_fw_header)) + if (size != ctx->blob->size - sizeof(struct b43_fw_header)) goto err_format; /* fallthrough */ case B43_FW_TYPE_IV: @@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx, goto err_format; } - fw->data = blob; + fw->data = ctx->blob; fw->filename = name; fw->type = ctx->req_type; @@ -2172,7 +2200,7 @@ err_format: snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), "Firmware file \"%s\" format error.\n", ctx->fwname); - release_firmware(blob); + release_firmware(ctx->blob); return -EPROTO; } @@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) goto err_no_ucode; } } - err = b43_do_request_fw(ctx, filename, &fw->ucode); + err = b43_do_request_fw(ctx, filename, &fw->ucode, true); if (err) goto err_load; @@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) else goto err_no_pcm; fw->pcm_request_failed = false; - err = b43_do_request_fw(ctx, filename, &fw->pcm); + err = b43_do_request_fw(ctx, filename, &fw->pcm, false); if (err == -ENOENT) { /* We did not find a PCM file? Not fatal, but * core rev <= 10 must do without hwcrypto then. */ @@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) default: goto err_no_initvals; } - err = b43_do_request_fw(ctx, filename, &fw->initvals); + err = b43_do_request_fw(ctx, filename, &fw->initvals, false); if (err) goto err_load; @@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) default: goto err_no_initvals; } - err = b43_do_request_fw(ctx, filename, &fw->initvals_band); + err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false); if (err) goto err_load; @@ -3400,7 +3428,7 @@ static void b43_tx_work(struct work_struct *work) break; } if (unlikely(err)) - dev_kfree_skb(skb); /* Drop it */ + ieee80211_free_txskb(wl->hw, skb); err = 0; } @@ -3421,7 +3449,7 @@ static void b43_op_tx(struct ieee80211_hw *hw, if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); return; } B43_WARN_ON(skb_shinfo(skb)->nr_frags); @@ -4217,8 +4245,12 @@ redo: /* Drain all TX queues. */ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { - while (skb_queue_len(&wl->tx_queue[queue_num])) - dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); + while (skb_queue_len(&wl->tx_queue[queue_num])) { + struct sk_buff *skb; + + skb = skb_dequeue(&wl->tx_queue[queue_num]); + ieee80211_free_txskb(wl->hw, skb); + } } b43_mac_suspend(dev); @@ -5437,6 +5469,8 @@ static void b43_ssb_remove(struct ssb_device *sdev) cancel_work_sync(&wldev->restart_work); B43_WARN_ON(!wl); + if (!wldev->fw.ucode.data) + return; /* NULL if firmware never loaded */ if (wl->current_dev == wldev) { /* Restore the queues count before unregistering, because firmware detect * might have modified it. Restoring is important, so the networking diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h index 8c684cd33529..abac25ee958d 100644 --- a/drivers/net/wireless/b43/main.h +++ b/drivers/net/wireless/b43/main.h @@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on); struct b43_request_fw_context; -int b43_do_request_fw(struct b43_request_fw_context *ctx, - const char *name, - struct b43_firmware_file *fw); +int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name, + struct b43_firmware_file *fw, bool async); void b43_do_release_fw(struct b43_firmware_file *fw); #endif /* B43_MAIN_H_ */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index 3533ab86bd36..a73ff8c9deb5 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -196,7 +196,7 @@ static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q) for (i = 0; i < ARRAY_SIZE(q->packets); i++) { pack = &(q->packets[i]); if (pack->skb) { - dev_kfree_skb_any(pack->skb); + ieee80211_free_txskb(q->dev->wl->hw, pack->skb); pack->skb = NULL; } } @@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ - dev_kfree_skb_any(skb); + ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h index a29da674e69d..482476fdb1f3 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -13,6 +13,7 @@ #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_driver_chipcommon.h> +#include <linux/completion.h> #include <net/mac80211.h> @@ -733,6 +734,10 @@ struct b43legacy_wldev { /* Firmware data */ struct b43legacy_firmware fw; + const struct firmware *fwp; /* needed to pass fw pointer */ + + /* completion struct for firmware loading */ + struct completion fw_load_complete; /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ struct list_head list; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index a98db30b7acb..53696efbe155 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -1511,9 +1511,17 @@ static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) "and download the correct firmware (version 3).\n"); } +static void b43legacy_fw_cb(const struct firmware *firmware, void *context) +{ + struct b43legacy_wldev *dev = context; + + dev->fwp = firmware; + complete(&dev->fw_load_complete); +} + static int do_request_fw(struct b43legacy_wldev *dev, const char *name, - const struct firmware **fw) + const struct firmware **fw, bool async) { char path[sizeof(modparam_fwpostfix) + 32]; struct b43legacy_fw_header *hdr; @@ -1526,7 +1534,24 @@ static int do_request_fw(struct b43legacy_wldev *dev, snprintf(path, ARRAY_SIZE(path), "b43legacy%s/%s.fw", modparam_fwpostfix, name); - err = request_firmware(fw, path, dev->dev->dev); + b43legacyinfo(dev->wl, "Loading firmware %s\n", path); + if (async) { + init_completion(&dev->fw_load_complete); + err = request_firmware_nowait(THIS_MODULE, 1, path, + dev->dev->dev, GFP_KERNEL, + dev, b43legacy_fw_cb); + if (err) { + b43legacyerr(dev->wl, "Unable to load firmware\n"); + return err; + } + /* stall here until fw ready */ + wait_for_completion(&dev->fw_load_complete); + if (!dev->fwp) + err = -EINVAL; + *fw = dev->fwp; + } else { + err = request_firmware(fw, path, dev->dev->dev); + } if (err) { b43legacyerr(dev->wl, "Firmware file \"%s\" not found " "or load failed.\n", path); @@ -1578,7 +1603,7 @@ static void b43legacy_request_firmware(struct work_struct *work) filename = "ucode4"; else filename = "ucode5"; - err = do_request_fw(dev, filename, &fw->ucode); + err = do_request_fw(dev, filename, &fw->ucode, true); if (err) goto err_load; } @@ -1587,7 +1612,7 @@ static void b43legacy_request_firmware(struct work_struct *work) filename = "pcm4"; else filename = "pcm5"; - err = do_request_fw(dev, filename, &fw->pcm); + err = do_request_fw(dev, filename, &fw->pcm, false); if (err) goto err_load; } @@ -1605,7 +1630,7 @@ static void b43legacy_request_firmware(struct work_struct *work) default: goto err_no_initvals; } - err = do_request_fw(dev, filename, &fw->initvals); + err = do_request_fw(dev, filename, &fw->initvals, false); if (err) goto err_load; } @@ -1625,7 +1650,7 @@ static void b43legacy_request_firmware(struct work_struct *work) default: goto err_no_initvals; } - err = do_request_fw(dev, filename, &fw->initvals_band); + err = do_request_fw(dev, filename, &fw->initvals_band, false); if (err) goto err_load; } @@ -3892,6 +3917,8 @@ static void b43legacy_remove(struct ssb_device *dev) cancel_work_sync(&wl->firmware_load); B43legacy_WARN_ON(!wl); + if (!wldev->fw.ucode) + return; /* NULL if fw never loaded */ if (wl->current_dev == wldev) ieee80211_unregister_hw(wl->hw); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 4187435220f3..4db878dfc365 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -764,8 +764,11 @@ static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode) { char iovbuf[32]; int retcode; + __le32 arp_mode_le; - brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + arp_mode_le = cpu_to_le32(arp_mode); + brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf, + sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); retcode = retcode >= 0 ? 0 : retcode; @@ -781,8 +784,11 @@ static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable) { char iovbuf[32]; int retcode; + __le32 arp_enable_le; - brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4, + arp_enable_le = cpu_to_le32(arp_enable); + + brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4, iovbuf, sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@ -803,10 +809,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) char buf[128], *ptr; u32 dongle_align = drvr->bus_if->align; u32 glom = 0; - u32 roaming = 1; - uint bcn_timeout = 3; - int scan_assoc_time = 40; - int scan_unassoc_time = 40; + __le32 roaming_le = cpu_to_le32(1); + __le32 bcn_timeout_le = cpu_to_le32(3); + __le32 scan_assoc_time_le = cpu_to_le32(40); + __le32 scan_unassoc_time_le = cpu_to_le32(40); int i; mutex_lock(&drvr->proto_block); @@ -841,14 +847,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) /* Setup timeout if Beacons are lost and roam is off to report link down */ - brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, + brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); /* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */ - brcmf_c_mkiovar("roam_off", (char *)&roaming, 4, + brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@ -863,9 +869,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME, - (char *)&scan_assoc_time, sizeof(scan_assoc_time)); + (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME, - (char *)&scan_unassoc_time, sizeof(scan_unassoc_time)); + (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le)); /* Set and enable ARP offload feature */ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index d13ae9c299f2..e360939f7904 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -500,8 +500,10 @@ static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); - if (ssid && ssid->SSID_len) - memcpy(¶ms_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); + if (ssid && ssid->SSID_len) { + params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len); + memcpy(¶ms_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len); + } } static s32 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 569ab8abd2a1..27f37c7379d6 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1400,9 +1400,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic) #endif t->ms = ms; t->periodic = (bool) periodic; - t->set = true; - - atomic_inc(&t->wl->callbacks); + if (!t->set) { + t->set = true; + atomic_inc(&t->wl->callbacks); + } ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms)); } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 3a6b991f0234..3dd80dfdcfdd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -10471,7 +10471,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, } else len = src->len; - dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); + dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC); if (!dst) continue; diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 4bc27112cbe2..2ab6b968c5f1 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -3957,17 +3957,21 @@ il_connection_init_rx_config(struct il_priv *il) memset(&il->staging, 0, sizeof(il->staging)); - if (!il->vif) { + switch (il->iw_mode) { + case NL80211_IFTYPE_UNSPECIFIED: il->staging.dev_type = RXON_DEV_TYPE_ESS; - } else if (il->vif->type == NL80211_IFTYPE_STATION) { + break; + case NL80211_IFTYPE_STATION: il->staging.dev_type = RXON_DEV_TYPE_ESS; il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; - } else if (il->vif->type == NL80211_IFTYPE_ADHOC) { + break; + case NL80211_IFTYPE_ADHOC: il->staging.dev_type = RXON_DEV_TYPE_IBSS; il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; il->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; - } else { + break; + default: IL_ERR("Unsupported interface type %d\n", il->vif->type); return; } @@ -4550,8 +4554,7 @@ out: EXPORT_SYMBOL(il_mac_add_interface); static void -il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, - bool mode_change) +il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) { lockdep_assert_held(&il->mutex); @@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, il_force_scan_end(il); } - if (!mode_change) - il_set_mode(il); - + il_set_mode(il); } void @@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) WARN_ON(il->vif != vif); il->vif = NULL; - - il_teardown_interface(il, vif, false); + il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; + il_teardown_interface(il, vif); memset(il->bssid, 0, ETH_ALEN); D_MAC80211("leave\n"); @@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } /* success */ - il_teardown_interface(il, vif, true); vif->type = newtype; vif->p2p = false; - err = il_set_mode(il); - WARN_ON(err); - /* - * We've switched internally, but submitting to the - * device may have failed for some reason. Mask this - * error, because otherwise mac80211 will not switch - * (and set the interface type back) and we'll be - * out of sync with it. - */ + il->iw_mode = newtype; + il_teardown_interface(il, vif); err = 0; out: diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index c0cfa4e652c9..2067bdff83ab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -170,7 +170,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, * See iwlagn_mac_channel_switch. */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl6000_channel_switch_cmd cmd; + struct iwl6000_channel_switch_cmd *cmd; const struct iwl_channel_info *ch_info; u32 switch_time_in_usec, ucode_switch_time; u16 ch; @@ -180,18 +180,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, struct ieee80211_vif *vif = ctx->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, - .len = { sizeof(cmd), }, + .len = { sizeof(*cmd), }, .flags = CMD_SYNC, - .data = { &cmd, }, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; + int err; - cmd.band = priv->band == IEEE80211_BAND_2GHZ; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + hcmd.data[0] = cmd; + + cmd->band = priv->band == IEEE80211_BAND_2GHZ; ch = ch_switch->channel->hw_value; IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", ctx->active.channel, ch); - cmd.channel = cpu_to_le16(ch); - cmd.rxon_flags = ctx->staging.flags; - cmd.rxon_filter_flags = ctx->staging.filter_flags; + cmd->channel = cpu_to_le16(ch); + cmd->rxon_flags = ctx->staging.flags; + cmd->rxon_filter_flags = ctx->staging.filter_flags; switch_count = ch_switch->count; tsf_low = ch_switch->timestamp & 0x0ffffffff; /* @@ -207,30 +214,32 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, switch_count = 0; } if (switch_count <= 1) - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time); else { switch_time_in_usec = vif->bss_conf.beacon_int * switch_count * TIME_UNIT; ucode_switch_time = iwl_usecs_to_beacons(priv, switch_time_in_usec, beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, + cmd->switch_time = iwl_add_beacon_time(priv, priv->ucode_beacon_time, ucode_switch_time, beacon_interval); } IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", - cmd.switch_time); + cmd->switch_time); ch_info = iwl_get_channel_info(priv, priv->band, ch); if (ch_info) - cmd.expect_beacon = is_channel_radar(ch_info); + cmd->expect_beacon = is_channel_radar(ch_info); else { IWL_ERR(priv, "invalid channel switch from %u to %u\n", ctx->active.channel, ch); return -EFAULT; } - return iwl_dvm_send_cmd(priv, &hcmd); + err = iwl_dvm_send_cmd(priv, &hcmd); + kfree(cmd); + return err; } static struct iwl_lib_ops iwl6000_lib = { diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 07f6e0092552..2977a12410c0 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -816,9 +816,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) return; } cmd_node = adapter->curr_cmd; - if (cmd_node->wait_q_enabled) - adapter->cmd_wait_q.status = -ETIMEDOUT; - if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; @@ -864,6 +861,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); + + if (cmd_node->wait_q_enabled) { + adapter->cmd_wait_q.status = -ETIMEDOUT; + wake_up_interruptible(&adapter->cmd_wait_q.wait); + mwifiex_cancel_pending_ioctl(adapter); + /* reset cmd_sent flag to unblock new commands */ + adapter->cmd_sent = false; + } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5867facd415d..6ca3d8a3e0e3 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -158,7 +158,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state) if (pdev) { card = (struct pcie_service_card *) pci_get_drvdata(pdev); - if (!card || card->adapter) { + if (!card || !card->adapter) { pr_err("Card or adapter structure is not valid\n"); return 0; } diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index f8012e2b7f7c..7d00a87e9a14 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -158,7 +158,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -185,12 +184,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d7b11defafe0..4fb146a8fac2 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -53,7 +53,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, */ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) { - bool cancel_flag = false; int status; struct cmd_ctrl_node *cmd_queued; @@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) queue_work(adapter->workqueue, &adapter->main_work); /* Wait for completion */ - wait_event_interruptible(adapter->cmd_wait_q.wait, - *(cmd_queued->condition)); - if (!*(cmd_queued->condition)) - cancel_flag = true; - - if (cancel_flag) { - mwifiex_cancel_pending_ioctl(adapter); - dev_dbg(adapter->dev, "cmd cancel\n"); + status = wait_event_interruptible(adapter->cmd_wait_q.wait, + *(cmd_queued->condition)); + if (status) { + dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); + return status; } status = adapter->cmd_wait_q.status; @@ -436,8 +432,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) return false; } - wait_event_interruptible(adapter->hs_activate_wait_q, - adapter->hs_activate_wait_q_woken); + if (wait_event_interruptible(adapter->hs_activate_wait_q, + adapter->hs_activate_wait_q_woken)) { + dev_err(adapter->dev, "hs_activate_wait_q terminated\n"); + return false; + } return true; } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index f4d28c39aac7..af30777ce31f 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -42,11 +42,12 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ @@ -82,7 +83,9 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ @@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 3a6b40239bc1..0ea85f46659d 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1624,6 +1625,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_BIT8, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + + /* * Initialize hw specifications. */ retval = rt2400pci_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h index d3a4a68cc439..7564ae992b73 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.h +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -670,6 +670,7 @@ #define GPIOCSR_BIT5 FIELD32(0x00000020) #define GPIOCSR_BIT6 FIELD32(0x00000040) #define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_BIT8 FIELD32(0x00000100) /* * BBPPCSR: BBP Pin control register. diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index dcc0e1fcca77..aa10c48c0dfa 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1942,6 +1943,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_DIR0, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + + /* * Initialize hw specifications. */ retval = rt2500pci_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 1de9c752c88b..e0a7efccb73b 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) u16 reg; rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); - return rt2x00_get_field32(reg, MAC_CSR19_BIT7); + return rt2x00_get_field16(reg, MAC_CSR19_BIT7); } #ifdef CONFIG_RT2X00_LIB_LEDS @@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u16 reg; /* * Allocate eeprom data. @@ -1781,6 +1782,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); + rt2x00_set_field16(®, MAC_CSR19_BIT8, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); + + /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h index b493306a7eed..196bd5103e4f 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -189,14 +189,15 @@ * MAC_CSR19: GPIO control register. */ #define MAC_CSR19 0x0426 -#define MAC_CSR19_BIT0 FIELD32(0x0001) -#define MAC_CSR19_BIT1 FIELD32(0x0002) -#define MAC_CSR19_BIT2 FIELD32(0x0004) -#define MAC_CSR19_BIT3 FIELD32(0x0008) -#define MAC_CSR19_BIT4 FIELD32(0x0010) -#define MAC_CSR19_BIT5 FIELD32(0x0020) -#define MAC_CSR19_BIT6 FIELD32(0x0040) -#define MAC_CSR19_BIT7 FIELD32(0x0080) +#define MAC_CSR19_BIT0 FIELD16(0x0001) +#define MAC_CSR19_BIT1 FIELD16(0x0002) +#define MAC_CSR19_BIT2 FIELD16(0x0004) +#define MAC_CSR19_BIT3 FIELD16(0x0008) +#define MAC_CSR19_BIT4 FIELD16(0x0010) +#define MAC_CSR19_BIT5 FIELD16(0x0020) +#define MAC_CSR19_BIT6 FIELD16(0x0040) +#define MAC_CSR19_BIT7 FIELD16(0x0080) +#define MAC_CSR19_BIT8 FIELD16(0x0100) /* * MAC_CSR20: LED control register. diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 6c0a12ea6a15..2ce6bf5b6715 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2235,7 +2235,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) /* * Check if temperature compensation is supported. */ - if (tssi_bounds[4] == 0xff) + if (tssi_bounds[4] == 0xff || step == 0xff) return 0; /* diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 0397bbf0ce01..ff81e761d20f 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -973,6 +973,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -986,6 +987,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + + /* * Initialize hw specifications. */ retval = rt2800_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 7e1a492680c1..65cb4250259f 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -667,8 +667,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, skb_pull(entry->skb, RXINFO_DESC_SIZE); /* - * FIXME: we need to check for rx_pkt_len validity + * Check for rx_pkt_len validity. Return if invalid, leaving + * rxdesc->size zeroed out by the upper level. */ + if (unlikely(rx_pkt_len == 0 || + rx_pkt_len > entry->queue->data_size)) { + ERROR(entry->queue->rt2x00dev, + "Bad frame size %d, forcing to 0\n", rx_pkt_len); + return; + } + rxd = (__le32 *)(entry->skb->data + rx_pkt_len); /* @@ -736,6 +744,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -749,6 +758,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + + /* * Initialize hw specifications. */ retval = rt2800_probe_hw_mode(rt2x00dev); @@ -1157,6 +1174,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x1690, 0x0744) }, { USB_DEVICE(0x1690, 0x0761) }, { USB_DEVICE(0x1690, 0x0764) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x179d) }, /* Cisco */ { USB_DEVICE(0x167b, 0x4001) }, /* EnGenius */ @@ -1222,7 +1241,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0b05, 0x1760) }, { USB_DEVICE(0x0b05, 0x1761) }, { USB_DEVICE(0x0b05, 0x1790) }, - { USB_DEVICE(0x0b05, 0x179d) }, /* AzureWave */ { USB_DEVICE(0x13d3, 0x3262) }, { USB_DEVICE(0x13d3, 0x3284) }, diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 90cc5e772650..12b1ff5a6a30 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -628,7 +628,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry) */ if (unlikely(rxdesc.size == 0 || rxdesc.size > entry->queue->data_size)) { - WARNING(rt2x00dev, "Wrong frame size %d max %d.\n", + ERROR(rt2x00dev, "Wrong frame size %d max %d.\n", rxdesc.size, entry->queue->data_size); dev_kfree_skb(entry->skb); goto renew_skb; diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 0f4bf8c23bff..bdaba3fddd9a 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2832,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Disable power saving. @@ -2850,6 +2851,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT13, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + + /* * Initialize hw specifications. */ retval = rt61pci_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h index e3cd6db76b0e..8f3da5a56766 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) /* * MAC_CSR14: LED control register. diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index e477a964081d..fda8671474dc 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -2190,6 +2191,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT15, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); + + /* * Initialize hw specifications. */ retval = rt73usb_probe_hw_mode(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h index 9f6b470414d3..df1cc116b83b 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) +#define MAC_CSR13_BIT14 FIELD32(0x00004000) +#define MAC_CSR13_BIT15 FIELD32(0x00008000) /* * MAC_CSR14: LED control register. diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index cf53ac9d6f23..86467a20c5b3 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index e54488db0e10..18d9eb3ad7af 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -980,7 +980,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) is_tx ? "Tx" : "Rx"); if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv-> + works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } @@ -990,7 +991,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } } else if (ETH_P_ARP == ether_type) { if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv->works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } @@ -1000,7 +1001,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv->works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index 04c3aef8a4f6..2925094b2d91 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -117,6 +117,7 @@ #define CHIP_VER_B BIT(4) #define CHIP_92C_BITMASK BIT(0) +#define CHIP_UNKNOWN BIT(7) #define CHIP_92C_1T2R 0x03 #define CHIP_92C 0x01 #define CHIP_88C 0x00 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index 5c4d9bc040f1..509f66195f2f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -995,8 +995,16 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { - version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C : - VERSION_B_CHIP_88C; + version = (enum version_8192c) (CHIP_VER_B | + ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & + CHIP_VER_RTL_MASK)) { + version = (enum version_8192c)(version | + ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) + ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | + CHIP_VENDOR_UMC)); + } } switch (version) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 2c3b73366cd2..4e2a45839ed7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -162,10 +162,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) /* request fw */ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && - !IS_92C_SERIAL(rtlhal->version)) + !IS_92C_SERIAL(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n"); + } rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 5bd408566350..8cf41bb98837 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, /* RTL8188CUS-VL */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, @@ -297,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c index 4a499928e4c6..a2a5c92a1807 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c @@ -1247,6 +1247,9 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel, /* Read HT 40 OFDM TX power */ ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index]; ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index]; + } else { + ofdmpowerLevel[0] = 0; + ofdmpowerLevel[1] = 0; } } diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index aa970fc18a21..6ce848457d16 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -42,8 +42,12 @@ static void usbctrl_async_callback(struct urb *urb) { - if (urb) - kfree(urb->context); + if (urb) { + /* free dr */ + kfree(urb->setup_packet); + /* free databuf */ + kfree(urb->transfer_buffer); + } } static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, @@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, u8 reqtype; struct usb_ctrlrequest *dr; struct urb *urb; - struct rtl819x_async_write_data { - u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; - struct usb_ctrlrequest dr; - } *buf; + const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; + u8 *databuf; + + if (WARN_ON_ONCE(len > databuf_maxlen)) + len = databuf_maxlen; pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; - buf = kmalloc(sizeof(*buf), GFP_ATOMIC); - if (!buf) + dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + if (!dr) + return -ENOMEM; + + databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); + if (!databuf) { + kfree(dr); return -ENOMEM; + } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - kfree(buf); + kfree(databuf); + kfree(dr); return -ENOMEM; } - dr = &buf->dr; - dr->bRequestType = reqtype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); /* data are already in little-endian order */ - memcpy(buf, pdata, len); + memcpy(databuf, pdata, len); usb_fill_control_urb(urb, udev, pipe, - (unsigned char *)dr, buf, len, - usbctrl_async_callback, buf); + (unsigned char *)dr, databuf, len, + usbctrl_async_callback, NULL); rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc < 0) - kfree(buf); + if (rc < 0) { + kfree(databuf); + kfree(dr); + } usb_free_urb(urb); return rc; } @@ -210,17 +222,16 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 index = REALTEK_USB_VENQT_CMD_IDX; int pipe = usb_sndctrlpipe(udev, 0); /* write_out */ u8 *buffer; - dma_addr_t dma_addr; - wvalue = (u16)(addr&0x0000ffff); - buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr); + wvalue = (u16)(addr & 0x0000ffff); + buffer = kmalloc(len, GFP_ATOMIC); if (!buffer) return; memcpy(buffer, data, len); usb_control_msg(udev, pipe, request, reqtype, wvalue, index, buffer, len, 50); - usb_free_coherent(udev, (size_t)len, buffer, dma_addr); + kfree(buffer); } static void _rtl_usb_io_handler_init(struct device *dev, @@ -543,8 +554,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); - _rtl_usb_rx_process_agg(hw, skb); - ieee80211_rx_irqsafe(hw, skb); + _rtl_usb_rx_process_agg(hw, _skb); + ieee80211_rx_irqsafe(hw, _skb); } } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c4..9d7f1723dd8f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); /* Notify xenvif that ring now has space to send an skb to the frontend */ void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); + /* Returns number of ring slots required to send an skb to the frontend */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338a..221f4265f7d6 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) static void xenvif_down(struct xenvif *vif) { disable_irq(vif->irq); + del_timer_sync(&vif->credit_timeout); xen_netbk_deschedule_xenvif(vif); xen_netbk_remove_xenvif(vif); } @@ -343,23 +344,26 @@ err: return err; } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif) { struct net_device *dev = vif->dev; - if (netif_carrier_ok(dev)) { - rtnl_lock(); - netif_carrier_off(dev); /* discard queued packets */ - if (netif_running(dev)) - xenvif_down(vif); - rtnl_unlock(); - xenvif_put(vif); - } + + rtnl_lock(); + netif_carrier_off(dev); /* discard queued packets */ + if (netif_running(dev)) + xenvif_down(vif); + rtnl_unlock(); + xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ + if (netif_carrier_ok(vif->dev)) + xenvif_carrier_off(vif); atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); - del_timer_sync(&vif->credit_timeout); - if (vif->irq) unbind_from_irqhandler(vif->irq, vif); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 2596401308a8..2bdf798b3d9c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif) atomic_dec(&netbk->netfront_count); } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); @@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif, do { make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - if (cons >= end) + if (cons == end) break; txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); @@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif, xenvif_put(vif); } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ + netdev_err(vif->dev, "fatal error; disabling device\n"); + xenvif_carrier_off(vif); + xenvif_put(vif); +} + static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, @@ -873,29 +881,33 @@ static int netbk_count_requests(struct xenvif *vif, do { if (frags >= work_to_do) { - netdev_dbg(vif->dev, "Need more frags\n"); - return -frags; + netdev_err(vif->dev, "Need more frags\n"); + netbk_fatal_tx_err(vif); + return -ENODATA; } if (unlikely(frags >= MAX_SKB_FRAGS)) { - netdev_dbg(vif->dev, "Too many frags\n"); - return -frags; + netdev_err(vif->dev, "Too many frags\n"); + netbk_fatal_tx_err(vif); + return -E2BIG; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { - netdev_dbg(vif->dev, "Frags galore\n"); - return -frags; + netdev_err(vif->dev, "Frag is bigger than frame.\n"); + netbk_fatal_tx_err(vif); + return -EIO; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", + netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); - return -frags; + netbk_fatal_tx_err(vif); + return -EINVAL; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; @@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, pending_idx = netbk->pending_ring[index]; page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) - return NULL; + goto err; gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; @@ -960,6 +972,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, } return gop; +err: + /* Unwind, freeing all pages and sending error responses. */ + while (i-- > start) { + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), + XEN_NETIF_RSP_ERROR); + } + /* The head too, if necessary. */ + if (start) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + return NULL; } static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, @@ -968,30 +991,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, { struct gnttab_copy *gop = *gopp; u16 pending_idx = *((u16 *)skb->data); - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; - struct xenvif *vif = pending_tx_info[pending_idx].vif; - struct xen_netif_tx_request *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; /* Check status of header. */ err = gop->status; - if (unlikely(err)) { - pending_ring_idx_t index; - index = pending_index(netbk->pending_prod++); - txp = &pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); - } + if (unlikely(err)) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; - pending_ring_idx_t index; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); @@ -1000,16 +1013,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - txp = &netbk->pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -1017,10 +1026,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -1054,7 +1063,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) /* Take an extra reference to offset xen_netbk_idx_release */ get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } } @@ -1067,7 +1076,8 @@ static int xen_netbk_get_extras(struct xenvif *vif, do { if (unlikely(work_to_do-- <= 0)) { - netdev_dbg(vif->dev, "Missing extra info\n"); + netdev_err(vif->dev, "Missing extra info\n"); + netbk_fatal_tx_err(vif); return -EBADR; } @@ -1076,8 +1086,9 @@ static int xen_netbk_get_extras(struct xenvif *vif, if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { vif->tx.req_cons = ++cons; - netdev_dbg(vif->dev, + netdev_err(vif->dev, "Invalid extra type: %d\n", extra.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1093,13 +1104,15 @@ static int netbk_set_skb_gso(struct xenvif *vif, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { - netdev_dbg(vif->dev, "GSO size must not be zero.\n"); + netdev_err(vif->dev, "GSO size must not be zero.\n"); + netbk_fatal_tx_err(vif); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1236,9 +1249,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); + /* This can sometimes happen because the test of + * list_empty(net_schedule_list) at the top of the + * loop is unlocked. Just go back and have another + * look. + */ if (!vif) continue; + if (vif->tx.sring->req_prod - vif->tx.req_cons > + XEN_NETIF_TX_RING_SIZE) { + netdev_err(vif->dev, + "Impossible number of requests. " + "req_prod %d, req_cons %d, size %ld\n", + vif->tx.sring->req_prod, vif->tx.req_cons, + XEN_NETIF_TX_RING_SIZE); + netbk_fatal_tx_err(vif); + continue; + } + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); @@ -1266,17 +1295,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; - if (unlikely(work_to_do < 0)) { - netbk_tx_err(vif, &txreq, idx); + if (unlikely(work_to_do < 0)) continue; - } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); - if (unlikely(ret < 0)) { - netbk_tx_err(vif, &txreq, idx - ret); + if (unlikely(ret < 0)) continue; - } + idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { @@ -1288,11 +1314,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, + netdev_err(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_tx_err(vif, &txreq, idx); + netbk_fatal_tx_err(vif); continue; } @@ -1320,8 +1346,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { + /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); continue; } } @@ -1420,7 +1446,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1475,7 +1501,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk) } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status) { struct xenvif *vif; struct pending_tx_info *pending_tx_info; @@ -1489,7 +1516,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) vif = pending_tx_info->vif; - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); + make_tx_response(vif, &pending_tx_info->req, status); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index cb6204f78300..692bd1e8d6b1 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1278,11 +1278,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { - struct pn533_cmd_jump_dep *cmd; struct pn533_cmd_jump_dep_response *resp; struct nfc_target nfc_target; u8 target_gt_len; int rc; + struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg; + u8 active = cmd->active; + + kfree(arg); if (params_len == -ENOENT) { nfc_dev_dbg(&dev->interface->dev, ""); @@ -1304,7 +1307,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, } resp = (struct pn533_cmd_jump_dep_response *) params; - cmd = (struct pn533_cmd_jump_dep *) arg; rc = resp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_dev_err(&dev->interface->dev, @@ -1334,7 +1336,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (rc == 0) rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, - !cmd->active, NFC_RF_INITIATOR); + !active, NFC_RF_INITIATOR); return 0; } @@ -1385,12 +1387,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx, rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, dev->in_maxlen, pn533_in_dep_link_up_complete, cmd, GFP_KERNEL); - if (rc) - goto out; - - -out: - kfree(cmd); + if (rc < 0) + kfree(cmd); return rc; } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 806c44fa645a..09bf37721842 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) return AE_OK; + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) { + warn("can't evaluate _ADR (%#x)\n", status); + return AE_OK; + } + + device = (adr >> 16) & 0xffff; + function = adr & 0xffff; + pdev = pbus->self; if (pdev && pci_is_pcie(pdev)) { tmp = acpi_find_root_bridge_handle(pdev); @@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) } } - acpi_evaluate_integer(handle, "_ADR", NULL, &adr); - device = (adr >> 16) & 0xffff; - function = adr & 0xffff; - newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL); if (!newfunc) return AE_NO_MEMORY; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 4b7cce1de6ec..a321b77a5e82 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -44,7 +44,6 @@ extern bool pciehp_poll_mode; extern int pciehp_poll_time; extern bool pciehp_debug; extern bool pciehp_force; -extern struct workqueue_struct *pciehp_wq; #define dbg(format, arg...) \ do { \ @@ -78,6 +77,7 @@ struct slot { struct hotplug_slot *hotplug_slot; struct delayed_work work; /* work for button event */ struct mutex lock; + struct workqueue_struct *wq; }; struct event_info { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 365c6b96c642..9e39df969560 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -42,7 +42,6 @@ bool pciehp_debug; bool pciehp_poll_mode; int pciehp_poll_time; bool pciehp_force; -struct workqueue_struct *pciehp_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -340,18 +339,13 @@ static int __init pcied_init(void) { int retval = 0; - pciehp_wq = alloc_workqueue("pciehp", 0, 0); - if (!pciehp_wq) - return -ENOMEM; - pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - if (retval) { - destroy_workqueue(pciehp_wq); + if (retval) dbg("Failure to register service\n"); - } + return retval; } @@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void) { dbg("unload_pciehpd()\n"); pcie_port_service_unregister(&hpdriver_portdrv); - destroy_workqueue(pciehp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 27f44295a657..38f018679175 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); return 0; } @@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) kfree(info); goto out; } - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); out: mutex_unlock(&p_slot->lock); } @@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot) if (ATTN_LED(ctrl)) pciehp_set_attention_status(p_slot, 0); - queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); + queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: @@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot) else p_slot->state = POWERON_STATE; - queue_work(pciehp_wq, &info->work); + queue_work(p_slot->wq, &info->work); } static void interrupt_event_handler(struct work_struct *work) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index a960faec1021..9dd2c01a2e18 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -874,23 +874,32 @@ static void pcie_shutdown_notification(struct controller *ctrl) static int pcie_init_slot(struct controller *ctrl) { struct slot *slot; + char name[32]; slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; + snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); + slot->wq = alloc_workqueue(name, 0, 0); + if (!slot->wq) + goto abort; + slot->ctrl = ctrl; mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); ctrl->slot = slot; return 0; +abort: + kfree(slot); + return -ENOMEM; } static void pcie_cleanup_slot(struct controller *ctrl) { struct slot *slot = ctrl->slot; cancel_delayed_work(&slot->work); - flush_workqueue(pciehp_wq); + destroy_workqueue(slot->wq); kfree(slot); } diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ca64932e658b..1b69d955a31f 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -47,7 +47,6 @@ extern bool shpchp_poll_mode; extern int shpchp_poll_time; extern bool shpchp_debug; extern struct workqueue_struct *shpchp_wq; -extern struct workqueue_struct *shpchp_ordered_wq; #define dbg(format, arg...) \ do { \ diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 7414fd9ad1d2..5f1f0d93dc13 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -40,7 +40,6 @@ bool shpchp_debug; bool shpchp_poll_mode; int shpchp_poll_time; struct workqueue_struct *shpchp_wq; -struct workqueue_struct *shpchp_ordered_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -175,7 +174,6 @@ void cleanup_slots(struct controller *ctrl) list_del(&slot->slot_list); cancel_delayed_work(&slot->work); flush_workqueue(shpchp_wq); - flush_workqueue(shpchp_ordered_wq); pci_hp_deregister(slot->hotplug_slot); } } @@ -364,17 +362,10 @@ static int __init shpcd_init(void) if (!shpchp_wq) return -ENOMEM; - shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); - if (!shpchp_ordered_wq) { - destroy_workqueue(shpchp_wq); - return -ENOMEM; - } - retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __func__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) { - destroy_workqueue(shpchp_ordered_wq); destroy_workqueue(shpchp_wq); } return retval; @@ -384,7 +375,6 @@ static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); pci_unregister_driver(&shpc_driver); - destroy_workqueue(shpchp_ordered_wq); destroy_workqueue(shpchp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index b00b09bdd38a..bba5b3e0bf8a 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -456,7 +456,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) kfree(info); goto out; } - queue_work(shpchp_ordered_wq, &info->work); + queue_work(shpchp_wq, &info->work); out: mutex_unlock(&p_slot->lock); } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 3cd3f4528133..54fc9880e392 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -952,6 +952,13 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); + /* + * The reason for doing this here is the same as for the analogous code + * in pci_pm_suspend_noirq(). + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 0ca053538146..a56105b66f12 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -637,6 +637,7 @@ static void aer_recover_work_func(struct work_struct *work) continue; } do_recovery(pdev, entry.severity); + pci_dev_put(pdev); } } #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b500840a143b..474f22f304e4 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -798,6 +798,9 @@ void pcie_clear_aspm(struct pci_bus *bus) { struct pci_dev *child; + if (aspm_force) + return; + /* * Clear any ASPM setup that the firmware has carried out on this bus */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5e1ca3c58a7d..63e0199f7c78 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -749,8 +749,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, /* Check if setup is sensible at all */ if (!pass && - (primary != bus->number || secondary <= bus->number)) { - dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); + (primary != bus->number || secondary <= bus->number || + secondary > subordinate)) { + dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", + secondary, subordinate); broken = 1; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4bf71028556b..680dbfa1b614 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2708,7 +2708,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) if (PCI_FUNC(dev->devfn)) return; /* - * RICOH 0xe823 SD/MMC card reader fails to recognize + * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base * clock frequency from 200Mhz to 50Mhz fixes this issue. * @@ -2719,7 +2719,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ - if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { + if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || + dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); @@ -2746,6 +2747,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index fd77e2bde2e8..eae55c7b6470 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev) static void pci_stop_dev(struct pci_dev *dev) { + pci_pme_active(dev, false); + if (dev->is_added) { pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index b066273b6b4f..7dd879ce514d 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c @@ -194,7 +194,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) sharpsl_pcmcia_init_reset(skt); } -static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { +static struct pcmcia_low_level sharpsl_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = sharpsl_pcmcia_hw_init, .socket_state = sharpsl_pcmcia_socket_state, diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index 86e4a1a3c642..6bb02ab5dd9a 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock) socket = &vrc4171_sockets[slot]; socket->csc_irq = search_nonuse_irq(); socket->io_irq = search_nonuse_irq(); + spin_lock_init(&socket->lock); return 0; } diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c index 67b8b9418277..c4d20565980a 100644 --- a/drivers/pinctrl/pinctrl-tegra30.c +++ b/drivers/pinctrl/pinctrl-tegra30.c @@ -3403,7 +3403,6 @@ static const struct tegra_function tegra30_functions[] = { .odrain_reg = -1, \ .lock_reg = -1, \ .ioreset_reg = -1, \ - .rcv_sel_reg = -1, \ .drv_reg = ((r) - DRV_PINGROUP_REG_A), \ .drv_bank = 0, \ .hsm_bit = hsm_b, \ diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index e38f91be0b10..110c7778cbf9 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -863,9 +863,9 @@ static ssize_t show_infos(struct device *dev, * The significance of others is yet to be found. * If we don't find the method, we assume the device are present. */ - rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp); if (!ACPI_FAILURE(rv)) - len += sprintf(page + len, "HRWS value : %#x\n", + len += sprintf(page + len, "HWRS value : %#x\n", (uint) temp); /* * Another value for userspace: the ASYM method returns 0x02 for @@ -1751,9 +1751,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus) * The significance of others is yet to be found. */ status = - acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result); + acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result); if (!ACPI_FAILURE(status)) - pr_notice(" HRWS returned %x", (int)hwrs_result); + pr_notice(" HWRS returned %x", (int)hwrs_result); if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 99a30b513137..6de14fd090a0 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -94,6 +94,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x8A, { KEY_PROG1 } }, { KE_KEY, 0x95, { KEY_MEDIA } }, { KE_KEY, 0x99, { KEY_PHONE } }, + { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */ + { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */ + { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */ + { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */ { KE_KEY, 0xb5, { KEY_CALC } }, { KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 7481146a5b47..97c2be195efc 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) { if (force) pr_warn("module loaded by force\n"); /* first ensure that we are running on IBM HW */ - else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) + else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; /* Get the address for the Extended BIOS Data Area */ diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index e2a34b42ddc1..de9f432cf22d 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -26,6 +26,7 @@ #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/ctype.h> +#include <linux/efi.h> #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) #include <acpi/video.h> #endif @@ -1506,6 +1507,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { }, .driver_data = &samsung_broken_acpi_video, }, + { + .callback = samsung_dmi_matched, + .ident = "N250P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), + DMI_MATCH(DMI_BOARD_NAME, "N250P"), + }, + .driver_data = &samsung_broken_acpi_video, + }, { }, }; MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); @@ -1517,6 +1528,9 @@ static int __init samsung_init(void) struct samsung_laptop *samsung; int ret; + if (efi_enabled(EFI_BOOT)) + return -ENODEV; + quirks = &samsung_unknown; if (!force && !dmi_check_system(samsung_dmi_table)) return -ENODEV; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index d21e8f59c84e..291906e8e9a9 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ - if (!('@' <= (c) || (c) <= 'Z')) \ + if (!('A' <= (c) && (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { @@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) return -ENODEV; } + if (WARN_ON_ONCE(acpi_dev != dev->data)) + dev->data = acpi_dev; + ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c index d8bb99394ac0..2a6bf76baac4 100644 --- a/drivers/power/ab8500_btemp.c +++ b/drivers/power/ab8500_btemp.c @@ -1115,7 +1115,7 @@ static void __exit ab8500_btemp_exit(void) platform_driver_unregister(&ab8500_btemp_driver); } -subsys_initcall_sync(ab8500_btemp_init); +device_initcall(ab8500_btemp_init); module_exit(ab8500_btemp_exit); MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c index 804b88c760d6..d8cd1517c842 100644 --- a/drivers/power/abx500_chargalg.c +++ b/drivers/power/abx500_chargalg.c @@ -1698,7 +1698,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj, static struct attribute abx500_chargalg_en_charger = \ { .name = "chargalg", - .mode = S_IWUGO, + .mode = S_IWUSR, }; static struct attribute *abx500_chargalg_chg[] = { diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 30d2072f480b..33471e11a738 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -439,6 +439,9 @@ static void tsi721_db_dpc(struct work_struct *work) " info %4.4x\n", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -449,6 +452,10 @@ static void tsi721_db_dpc(struct work_struct *work) regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); } /** @@ -2155,7 +2162,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i, cap; + int cap; int err; u32 regval; @@ -2175,12 +2182,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, priv->pdev = pdev; #ifdef DEBUG + { + int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", i, (unsigned long long)pci_resource_start(pdev, i), (unsigned long)pci_resource_len(pdev, i), pci_resource_flags(pdev, i)); } + } #endif /* * Verify BAR configuration diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 9cdfc389ca26..1eb7a1510937 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -1085,7 +1085,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); -TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); +TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); @@ -1166,7 +1166,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = { TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6), TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN), TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), - TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), + TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index ff810e787eac..97bb31d0bbef 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -330,7 +330,7 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, if (vsel > dcdc->dvs_vsel) { ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, - dcdc->dvs_vsel); + vsel); if (ret == 0) dcdc->dvs_vsel = vsel; else diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index f8d818abf98c..811ff72df4db 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -5,6 +5,7 @@ config REMOTEPROC tristate depends on EXPERIMENTAL select FW_CONFIG + select VIRTIO config OMAP_REMOTEPROC tristate "OMAP remoteproc support" diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 7591b9774d05..837cc40180aa 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -643,17 +643,10 @@ static int rproc_handle_carveout(struct rproc *rproc, dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", rsc->da, rsc->pa, rsc->len, rsc->flags); - mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); - if (!mapping) { - dev_err(dev, "kzalloc mapping failed\n"); - return -ENOMEM; - } - carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); if (!carveout) { dev_err(dev, "kzalloc carveout failed\n"); - ret = -ENOMEM; - goto free_mapping; + return -ENOMEM; } va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL); @@ -683,11 +676,18 @@ static int rproc_handle_carveout(struct rproc *rproc, * physical address in this case. */ if (rproc->domain) { + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) { + dev_err(dev, "kzalloc mapping failed\n"); + ret = -ENOMEM; + goto dma_free; + } + ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, rsc->flags); if (ret) { dev_err(dev, "iommu_map failed: %d\n", ret); - goto dma_free; + goto free_mapping; } /* @@ -728,12 +728,12 @@ static int rproc_handle_carveout(struct rproc *rproc, return 0; +free_mapping: + kfree(mapping); dma_free: dma_free_coherent(dev, rsc->len, va, dma); free_carv: kfree(carveout); -free_mapping: - kfree(mapping); return ret; } diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index d93a9608b1f0..bc744b490b02 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev) if (imxdi->ioaddr == NULL) return -ENOMEM; + spin_lock_init(&imxdi->irq_lock); + imxdi->irq = platform_get_irq(pdev, 0); if (imxdi->irq < 0) return imxdi->irq; diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index dd2aeee6c66a..8f8c8ae3523a 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; + struct rtc_device *rtc = i2c_get_clientdata(client); int handled = 0, sr, err; /* @@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data) if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index 77074ccd2850..fd5c7af04ae5 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); if (!pdata->rtc_24h) { - tm->tm_hour %= 12; - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { + tm->tm_hour -= 20; + tm->tm_hour %= 12; tm->tm_hour += 12; + } else + tm->tm_hour %= 12; } tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 258abeabf624..63ccc0f9427f 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -495,6 +495,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) if (ret < 0) goto out1; + /* ensure interrupts are disabled, bootloaders can be strange */ + ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); + if (ret < 0) + dev_warn(&pdev->dev, "unable to disable interrupt\n"); + /* init cached IRQ enable bits */ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); if (ret < 0) diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index 9e94fb147c26..44878da1f064 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -69,7 +69,7 @@ | ALARM_SEC_BIT) #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ -#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ +#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ @@ -118,7 +118,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); - tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); + tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1; tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; @@ -137,8 +137,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) } writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) - | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) - | (bin2bcd(tm->tm_mday)), + | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) + | (bin2bcd(tm->tm_mday)) + | ((tm->tm_year >= 200) << DATE_CENTURY_S), vt8500_rtc->regbase + VT8500_RTC_DS); writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) @@ -248,7 +249,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) } /* Enable RTC and set it to 24-hour mode */ - writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, + writel(VT8500_RTC_CR_ENABLE, vt8500_rtc->regbase + VT8500_RTC_CR); vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 07a4fd29f096..daa6b903aa91 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) * Determine pathgroup state from PGID data. */ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, - int *mismatch, int *reserved, u8 *reset) + int *mismatch, u8 *reserved, u8 *reset) { struct pgid *pgid = &cdev->private->pgid[0]; struct pgid *first = NULL; @@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, if ((cdev->private->pgid_valid_mask & lpm) == 0) continue; if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) - *reserved = 1; + *reserved |= lpm; if (pgid_is_reset(pgid)) { *reset |= lpm; continue; @@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct pgid *pgid; int mismatch = 0; - int reserved = 0; + u8 reserved = 0; u8 reset = 0; u8 donepm; if (rc) goto out; pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); - if (reserved) + if (reserved == cdev->private->pgid_valid_mask) rc = -EUSERS; else if (mismatch) rc = -EOPNOTSUPP; @@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc) } out: CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " - "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid, + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index d74e9ae6dfb3..f97b2aa65f42 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -418,6 +418,26 @@ static void kvm_extint_handler(struct ext_code ext_code, } /* + * For s390-virtio, we expect a page above main storage containing + * the virtio configuration. Try to actually load from this area + * in order to figure out if the host provides this page. + */ +static int __init test_devices_support(unsigned long addr) +{ + int ret = -EIO; + + asm volatile( + "0: lura 0,%1\n" + "1: xgr %0,%0\n" + "2:\n" + EX_TABLE(0b,2b) + EX_TABLE(1b,2b) + : "+d" (ret) + : "a" (addr) + : "0", "cc"); + return ret; +} +/* * Init function for virtio * devices are in a single page above top of "normal" mem */ @@ -428,21 +448,23 @@ static int __init kvm_devices_init(void) if (!MACHINE_IS_KVM) return -ENODEV; + if (test_devices_support(real_memory_size) < 0) + return -ENODEV; + + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); + if (rc) + return rc; + + kvm_devices = (void *) real_memory_size; + kvm_root = root_device_register("kvm_s390"); if (IS_ERR(kvm_root)) { rc = PTR_ERR(kvm_root); printk(KERN_ERR "Could not register kvm_s390 root device"); + vmem_remove_mapping(real_memory_size, PAGE_SIZE); return rc; } - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); - if (rc) { - root_device_unregister(kvm_root); - return rc; - } - - kvm_devices = (void *) real_memory_size; - INIT_WORK(&hotplug_work, hotplug_devices); service_subclass_irq_register(); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 086018109662..4f1b10b7dea6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); + atomic_set(&port->units, 0); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 96f13ad88123..79a6afe0212a 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } -static int zfcp_ccw_activate(struct ccw_device *cdev) - +/** + * zfcp_ccw_activate - activate adapter and wait for it to finish + * @cdev: pointer to belonging ccw device + * @clear: Status flags to clear. + * @tag: s390dbf trace record tag + */ +static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; + zfcp_erp_clear_adapter_status(adapter, clear); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccresu2"); + tag); zfcp_erp_wait(adapter); flush_work(&adapter->scan_work); @@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; - zfcp_ccw_activate(cdev); + zfcp_ccw_activate(cdev, 0, "ccsonl1"); zfcp_ccw_adapter_put(adapter); return 0; } /** - * zfcp_ccw_set_offline - set_offline function of zfcp driver + * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish * @cdev: pointer to belonging ccw device + * @set: Status flags to set. + * @tag: s390dbf trace record tag * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ -static int zfcp_ccw_set_offline(struct ccw_device *cdev) +static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; - zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1"); + zfcp_erp_set_adapter_status(adapter, set); + zfcp_erp_adapter_shutdown(adapter, 0, tag); zfcp_erp_wait(adapter); zfcp_ccw_adapter_put(adapter); @@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev) } /** + * zfcp_ccw_set_offline - set_offline function of zfcp driver + * @cdev: pointer to belonging ccw device + * + * This function gets called by the common i/o layer and sets an adapter + * into state offline. + */ +static int zfcp_ccw_set_offline(struct ccw_device *cdev) +{ + return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1"); +} + +/** * zfcp_ccw_notify - ccw notify function * @cdev: pointer to belonging ccw device * @event: indicates if adapter was detached or attached @@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) switch (event) { case CIO_GONE: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccnigo1", adapter); + break; + } dev_warn(&cdev->dev, "The FCP device has been detached\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); break; @@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); break; case CIO_OPER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccniop1", adapter); + break; + } dev_info(&cdev->dev, "The FCP device is operational again\n"); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); @@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) zfcp_ccw_adapter_put(adapter); } +static int zfcp_ccw_suspend(struct ccw_device *cdev) +{ + zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1"); + return 0; +} + +static int zfcp_ccw_thaw(struct ccw_device *cdev) +{ + /* trace records for thaw and final shutdown during suspend + can only be found in system dump until the end of suspend + but not after resume because it's based on the memory image + right after the very first suspend (freeze) callback */ + zfcp_ccw_activate(cdev, 0, "ccthaw1"); + return 0; +} + +static int zfcp_ccw_resume(struct ccw_device *cdev) +{ + zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1"); + return 0; +} + struct ccw_driver zfcp_ccw_driver = { .driver = { .owner = THIS_MODULE, @@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = { .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, - .freeze = zfcp_ccw_set_offline, - .thaw = zfcp_ccw_activate, - .restore = zfcp_ccw_activate, + .freeze = zfcp_ccw_suspend, + .thaw = zfcp_ccw_thaw, + .restore = zfcp_ccw_resume, }; diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index fab2c2592a97..8ed63aa9abea 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) } read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, port->adapter->scsi_host) { + shost_for_each_device(sdev, adapter->scsi_host) { zfcp_sdev = sdev_to_zfcp(sdev); status = atomic_read(&zfcp_sdev->status); if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a9a816e4aa55..79b98482547b 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, length = min((u16)sizeof(struct qdio_buffer), (u16)ZFCP_DBF_PAY_MAX_REC); - while ((char *)pl[payload->counter] && payload->counter < scount) { + while (payload->counter < scount && (char *)pl[payload->counter]) { memcpy(payload->data, (char *)pl[payload->counter], length); debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); payload->counter++; @@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, spin_unlock_irqrestore(&dbf->pay_lock, flags); } +/** + * zfcp_dbf_hba_basic - trace event for basic adapter events + * @adapter: pointer to struct zfcp_adapter + */ +void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; + unsigned long flags; + + spin_lock_irqsave(&dbf->hba_lock, flags); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_BASIC; + + debug_event(dbf->hba, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); +} + static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, struct zfcp_adapter *adapter, struct zfcp_port *port, diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 714f087eb7a9..3ac7a4b30dd9 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id { ZFCP_DBF_HBA_RES = 1, ZFCP_DBF_HBA_USS = 2, ZFCP_DBF_HBA_BIT = 3, + ZFCP_DBF_HBA_BASIC = 4, }; /** diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index ed5d921e82cd..f172b844217d 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -77,6 +77,7 @@ struct zfcp_reqlist; #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 +#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 @@ -204,6 +205,7 @@ struct zfcp_port { struct zfcp_adapter *adapter; /* adapter used to access port */ struct list_head unit_list; /* head of logical unit list */ rwlock_t unit_list_lock; /* unit list lock */ + atomic_t units; /* zfcp_unit count */ atomic_t status; /* status of this remote port */ u64 wwnn; /* WWNN if known */ u64 wwpn; /* WWPN */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 2302e1cfb76c..ef9e50225ee7 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); +extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); @@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); extern struct attribute_group zfcp_sysfs_unit_attrs; extern struct attribute_group zfcp_sysfs_adapter_attrs; extern struct attribute_group zfcp_sysfs_port_attrs; +extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e9a787e2e6a5..2136fc2178eb 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) return; } - zfcp_dbf_hba_fsf_uss("fssrh_2", req); + zfcp_dbf_hba_fsf_uss("fssrh_4", req); switch (sr_buf->status_type) { case FSF_STATUS_READ_PORT_CLOSED: @@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) } } +#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) +#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) +#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) +#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) +#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) +#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) +#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) + +static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) +{ + u32 fdmi_speed = 0; + if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) + fdmi_speed |= FC_PORTSPEED_1GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) + fdmi_speed |= FC_PORTSPEED_2GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) + fdmi_speed |= FC_PORTSPEED_4GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) + fdmi_speed |= FC_PORTSPEED_10GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) + fdmi_speed |= FC_PORTSPEED_8GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) + fdmi_speed |= FC_PORTSPEED_16GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) + fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; + return fdmi_speed; +} + static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) { struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; @@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_name(shost) = nsp->fl_wwpn; fc_host_node_name(shost) = nsp->fl_wwnn; fc_host_port_id(shost) = ntoh24(bottom->s_id); - fc_host_speed(shost) = bottom->fc_link_speed; + fc_host_speed(shost) = + zfcp_fsf_convert_portspeed(bottom->fc_link_speed); fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; adapter->hydra_version = bottom->adapter_type; @@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) } else fc_host_permanent_port_name(shost) = fc_host_port_name(shost); fc_host_maxframe_size(shost) = bottom->maximum_frame_size; - fc_host_supported_speeds(shost) = bottom->supported_speed; + fc_host_supported_speeds(shost) = + zfcp_fsf_convert_portspeed(bottom->supported_speed); memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, FC_FC4_LIST_SIZE); memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, @@ -771,12 +801,14 @@ out: static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) { struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { @@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_res("fsscth1", req); + zfcp_dbf_san_res("fsscth2", req); ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED | ZFCP_STATUS_LUN_SHARED | @@ -1856,11 +1890,13 @@ out: static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) { struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); @@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) { struct fsf_qual_latency_info *lat_in; struct latency_cont *lat = NULL; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device); + struct zfcp_scsi_dev *zfcp_sdev; struct zfcp_blk_drv_data blktrc; int ticks = req->adapter->timer_ticks; @@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + zfcp_sdev = sdev_to_zfcp(scsi->device); blktrc.flags |= ZFCP_BLK_LAT_VALID; blktrc.channel_lat = lat_in->channel_lat * ticks; blktrc.fabric_lat = lat_in->fabric_lat * ticks; @@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) { struct scsi_cmnd *scmnd = req->data; struct scsi_device *sdev = scmnd->device; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: case FSF_PORT_HANDLE_NOT_VALID: diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e14da5751d32..e76d003ebdb3 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; struct zfcp_adapter *adapter = qdio->adapter; - struct qdio_buffer_element *sbale; int sbal_no, sbal_idx; - void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; - u64 req_id; - u8 scount; if (unlikely(qdio_err)) { - memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); if (zfcp_adapter_multi_buffer_active(adapter)) { + void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; + struct qdio_buffer_element *sbale; + u64 req_id; + u8 scount; + + memset(pl, 0, + ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); sbale = qdio->res_q[idx]->element; req_id = (u64) sbale->addr; - scount = sbale->scount + 1; /* incl. signaling SBAL */ + scount = min(sbale->scount + 1, + ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); + /* incl. signaling SBAL */ for (sbal_no = 0; sbal_no < scount; sbal_no++) { sbal_idx = (idx + sbal_no) % diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index cdc4ff78a7ba..9e62210b294f 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, zfcp_sysfs_port_rescan_store); +DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); + static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, else retval = 0; + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) > 0) { + retval = -EBUSY; + mutex_unlock(&zfcp_sysfs_port_units_mutex); + goto out; + } + /* port is about to be removed, so no more unit_add */ + atomic_set(&port->units, -1); + mutex_unlock(&zfcp_sysfs_port_units_mutex); + write_lock_irq(&adapter->port_list_lock); list_del(&port->list); write_unlock_irq(&adapter->port_list_lock); @@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); u64 fcp_lun; + int retval; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; - if (zfcp_unit_add(port, fcp_lun)) - return -EINVAL; + retval = zfcp_unit_add(port, fcp_lun); + if (retval) + return retval; return count; } diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 20796ebc33ce..4e6a5356bdbd 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); - put_device(&unit->port->dev); + atomic_dec(&unit->port->units); kfree(unit); } @@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev) int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; + int retval = 0; + + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) == -1) { + /* port is already gone */ + retval = -ENODEV; + goto out; + } unit = zfcp_unit_find(port, fcp_lun); if (unit) { put_device(&unit->dev); - return -EEXIST; + retval = -EEXIST; + goto out; } unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); - if (!unit) - return -ENOMEM; + if (!unit) { + retval = -ENOMEM; + goto out; + } unit->port = port; unit->fcp_lun = fcp_lun; @@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) if (dev_set_name(&unit->dev, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); - return -ENOMEM; + retval = -ENOMEM; + goto out; } - get_device(&port->dev); - if (device_register(&unit->dev)) { put_device(&unit->dev); - return -ENOMEM; + retval = -ENOMEM; + goto out; } if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->dev); - return -EINVAL; + retval = -EINVAL; + goto out; } + atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */ + write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); zfcp_unit_scsi_scan(unit); - return 0; +out: + mutex_unlock(&zfcp_sysfs_port_units_mutex); + return retval; } /** diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 19a36945e6fd..dd4547bf6881 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -2984,8 +2984,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr) char *start = pos; int i; - SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ", - (unsigned int) ptr, ptr->device->id, ptr->device->lun); + SPRINTF("%p: target=%d; lun=%d; cmnd=( ", + ptr, ptr->device->id, ptr->device->lun); for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) SPRINTF("0x%02x ", ptr->cmnd[i]); diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 68ce08552f69..a540162ac59c 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -1173,7 +1173,16 @@ wait_io1: outw(val, tmport); outb(2, 0x80); TCM_SYNC: - udelay(0x800); + /* + * The funny division into multiple delays is to accomodate + * arches like ARM where udelay() multiplies its argument by + * a large number to initialize a loop counter. To avoid + * overflow, the maximum supported udelay is 2000 microseconds. + * + * XXX it would be more polite to find a way to use msleep() + */ + mdelay(2); + udelay(48); if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ outw(0, tmport--); outb(0, tmport); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index f9d6f4129093..72de3ba51657 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1264,6 +1264,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) int rc = 0; u64 mask64; + memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1)); + memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2)); + bnx2i_adjust_qp_size(hba); iscsi_init.flags = diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 04c5cea47a22..3a8ba3e433d2 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -583,8 +583,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: - case TPGS_STATE_UNAVAILABLE: - /* Path unusable for unavailable/offline */ + /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; break; default: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 500e20dd56ec..b0fefc435c03 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -548,12 +548,42 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } +static int is_firmware_flash_cmd(u8 *cdb) +{ + return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; +} + +/* + * During firmware flash, the heartbeat register may not update as frequently + * as it should. So we dial down lockup detection during firmware flash. and + * dial it back up when firmware flash completes. + */ +#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) +#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) +static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, + struct CommandList *c) +{ + if (!is_firmware_flash_cmd(c->Request.CDB)) + return; + atomic_inc(&h->firmware_flash_in_progress); + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; +} + +static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, + struct CommandList *c) +{ + if (is_firmware_flash_cmd(c->Request.CDB) && + atomic_dec_and_test(&h->firmware_flash_in_progress)) + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; +} + static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) { unsigned long flags; set_performant_mode(h, c); + dial_down_lockup_detection_during_fw_flash(h, c); spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; @@ -1264,8 +1294,9 @@ static void complete_scsi_command(struct CommandList *cp) } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p has " - "protocol error \n", cp); + "protocol error\n", cp); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; @@ -2942,7 +2973,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; - c->Request.CDB[1] = 0x03; /* Reset target above */ + c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; /* If bytes 4-7 are zero, it means reset the */ /* LunID device */ c->Request.CDB[4] = 0x00; @@ -3048,6 +3079,7 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index, static inline void finish_cmd(struct CommandList *c, u32 raw_tag) { removeQ(c); + dial_up_lockup_detection_on_fw_flash_complete(c->h, c); if (likely(c->cmd_type == CMD_SCSI)) complete_scsi_command(c); else if (c->cmd_type == CMD_IOCTL_PEND) @@ -4188,9 +4220,6 @@ static void controller_lockup_detected(struct ctlr_info *h) spin_unlock_irqrestore(&h->lock, flags); } -#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ) -#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2) - static void detect_controller_lockup(struct ctlr_info *h) { u64 now; @@ -4201,7 +4230,7 @@ static void detect_controller_lockup(struct ctlr_info *h) now = get_jiffies_64(); /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + - (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) + (h->heartbeat_sample_interval), now)) return; /* @@ -4210,7 +4239,7 @@ static void detect_controller_lockup(struct ctlr_info *h) * otherwise don't care about signals in this thread. */ if (time_after64(h->last_heartbeat_timestamp + - (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) + (h->heartbeat_sample_interval), now)) return; /* If heartbeat has not changed since we last looked, we're not ok. */ @@ -4252,6 +4281,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) { unsigned long flags; + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; spin_lock_irqsave(&lockup_detector_lock, flags); list_add_tail(&h->lockup_list, &hpsa_ctlr_list); spin_unlock_irqrestore(&lockup_detector_lock, flags); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 7b28d54fa878..6f30a6f477ce 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -123,6 +123,8 @@ struct ctlr_info { u64 last_intr_timestamp; u32 last_heartbeat; u64 last_heartbeat_timestamp; + u32 heartbeat_sample_interval; + atomic_t firmware_flash_in_progress; u32 lockup_detected; struct list_head lockup_list; }; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 8049815d8c1e..cdd742e4406a 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -162,6 +162,7 @@ struct SenseSubsystem_info { #define BMIC_WRITE 0x27 #define BMIC_CACHE_FLUSH 0xc2 #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ +#define BMIC_FLASH_FIRMWARE 0xF7 /* Command List Structure */ union SCSI3Addr { diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 3a6c4742951e..337e8b33d9aa 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1541,6 +1541,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, host_config = &evt_struct->iu.mad.host_config; + /* The transport length field is only 16-bit */ + length = min(0xffff, length); + /* Set up a lun reset SRP command */ memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index bc6cf8886312..5a1bd0ca2bb9 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -470,7 +470,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic return -ENOMEM; pci_set_drvdata(pdev, pci_info); - if (efi_enabled) + if (efi_enabled(EFI_RUNTIME_SERVICES)) orom = isci_get_efi_var(pdev); if (!orom) @@ -481,7 +481,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic orom->hdr.version)) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); - devm_kfree(&pdev->dev, orom); orom = NULL; break; } diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 9b8117b9d756..4c66f4682fd4 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) if (i >= len) { dev_err(&pdev->dev, "oprom parse error\n"); - devm_kfree(&pdev->dev, rom); rom = NULL; } pci_unmap_biosrom(oprom); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 2def1e3960f6..05c3fc0ce27f 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1970,7 +1970,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index fe5d396aca73..e2516ba8ebfa 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -22,7 +22,9 @@ ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage ccflags-$(GCOV) += -O0 +ifdef WARNINGS_BECOME_ERRORS ccflags-y += -Werror +endif obj-$(CONFIG_SCSI_LPFC) := lpfc.o diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 8b300be44284..6308a8ddf6ff 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); - spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); @@ -5392,6 +5391,8 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); + spin_lock_init(&poll_aen_lock); + support_poll_for_event = 2; support_device_change = 1; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 3f03342b3030..db793621c403 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1202,6 +1202,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) u16 message_control; + /* Check whether controller SAS2008 B0 controller, + if it is SAS2008 B0 controller use IO-APIC instead of MSIX */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == 0x01) { + return -EINVAL; + } + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); if (!base) { dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " @@ -2417,10 +2424,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } /* command line tunables for max controller queue depth */ - if (max_queue_depth != -1) - max_request_credit = (max_queue_depth < facts->RequestCredit) - ? max_queue_depth : facts->RequestCredit; - else + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); @@ -2495,7 +2505,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth - (2); + ioc->shost->can_queue = ioc->scsiio_depth; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 8f7eb4f21140..487aa6f97412 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -258,21 +258,11 @@ enum sas_sata_phy_regs { #define SPI_ADDR_VLD_94XX (1U << 1) #define SPI_CTRL_SpiStart_94XX (1U << 0) -#define mv_ffc(x) ffz(x) - static inline int mv_ffc64(u64 v) { - int i; - i = mv_ffc((u32)v); - if (i >= 0) - return i; - i = mv_ffc((u32)(v>>32)); - - if (i != 0) - return 32 + i; - - return -1; + u64 x = ~v; + return x ? __ffs64(x) : -1; } #define r_reg_set_enable(i) \ diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index fd3b2839843b..dbb8edfc8baa 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -885,7 +885,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, struct completion *completion, int is_tmf, struct mvs_tmf_task *tmf) { - struct domain_device *dev = task->dev; struct mvs_info *mvi = NULL; u32 rc = 0; u32 pass = 0; @@ -1630,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task) mv_dprintk("mvs_abort_task() mvi=%p task=%p " "slot=%p slot_idx=x%x\n", mvi, task, slot, slot_idx); - mvs_tmf_timedout((unsigned long)task); + task->task_state_flags |= SAS_TASK_STATE_ABORTED; mvs_slot_task_free(mvi, task, slot, slot_idx); rc = TMF_RESP_FUNC_COMPLETE; goto out; diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index c04a4f5b5972..da249553858c 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ ((type == EDGE_DEV) || (type == FANOUT_DEV)) -#define bit(n) ((u32)1 << n) +#define bit(n) ((u64)1 << n) #define for_each_phy(__lseq_mask, __mc, __lseq) \ for ((__mc) = (__lseq_mask), (__lseq) = 0; \ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7db803377c64..da1315ede009 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3735,9 +3735,9 @@ qla2x00_do_dpc(void *data) "ISP abort end.\n"); } - if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { + if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, + &base_vha->dpc_flags)) { qla2x00_update_fcports(base_vha); - clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); } if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 182d5a57ab74..f4cc4139ada8 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2054,7 +2054,7 @@ static void unmap_region(sector_t lba, unsigned int len) block = lba + alignment; rem = do_div(block, granularity); - if (rem == 0 && lba + granularity <= end && block < map_size) { + if (rem == 0 && lba + granularity < end && block < map_size) { clear_bit(block, map_storep); if (scsi_debug_lbprz) memset(fake_storep + diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index cc8dc8cc6d6f..b3f0b0f6d44b 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -42,6 +42,8 @@ #include <trace/events/scsi.h> +static void scsi_eh_done(struct scsi_cmnd *scmd); + #define SENSE_TIMEOUT (10*HZ) /* @@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ + if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 19291463c554..2bc036292d9d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -760,7 +760,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ - req->errors = result; if (result) { if (sense_valid && req->sense) { /* @@ -776,6 +775,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } + /* + * __scsi_error_from_host_byte may have reset the host_byte + */ + req->errors = cmd->result; req->resid_len = scsi_get_resid(cmd); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 8906557d2a24..348840e80921 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -776,6 +776,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); + if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + /* + * sata emulation layer device. This is a hack to work around + * the SATL power management specifications which state that + * when the SATL detects the device has gone into standby + * mode, it shall respond with NOT READY. + */ + sdev->allow_restart = 1; + } + if (*bflags & BLIST_ISROM) { sdev->type = TYPE_ROM; sdev->removable = 1; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index bb7c4828acc0..72ca515a4304 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -246,11 +246,11 @@ show_shost_active_mode(struct device *dev, static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); -static int check_reset_type(char *str) +static int check_reset_type(const char *str) { - if (strncmp(str, "adapter", 10) == 0) + if (sysfs_streq(str, "adapter")) return SCSI_ADAPTER_RESET; - else if (strncmp(str, "firmware", 10) == 0) + else if (sysfs_streq(str, "firmware")) return SCSI_FIRMWARE_RESET; else return 0; @@ -263,12 +263,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct scsi_host_template *sht = shost->hostt; int ret = -EINVAL; - char str[10]; int type; - sscanf(buf, "%s", str); - type = check_reset_type(str); - + type = check_reset_type(buf); if (!type) goto exit_store_host_reset; @@ -1023,33 +1020,31 @@ static void __scsi_remove_target(struct scsi_target *starget) void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); - struct scsi_target *starget, *found; + struct scsi_target *starget, *last = NULL; unsigned long flags; - restart: - found = NULL; + /* remove targets being careful to lookup next entry before + * deleting the last + */ spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL) continue; if (starget->dev.parent == dev || &starget->dev == dev) { - found = starget; - found->reap_ref++; - break; + /* assuming new targets arrive at the end */ + starget->reap_ref++; + spin_unlock_irqrestore(shost->host_lock, flags); + if (last) + scsi_target_reap(last); + last = starget; + __scsi_remove_target(starget); + spin_lock_irqsave(shost->host_lock, flags); } } spin_unlock_irqrestore(shost->host_lock, flags); - if (found) { - __scsi_remove_target(found); - scsi_target_reap(found); - /* in the case where @dev has multiple starget children, - * continue removing. - * - * FIXME: does such a case exist? - */ - goto restart; - } + if (last) + scsi_target_reap(last); } EXPORT_SYMBOL(scsi_remove_target); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a239382b2bdd..5b3cadbffcfc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2919,10 +2919,6 @@ static int __init init_sd(void) if (err) goto err_out; - err = scsi_register_driver(&sd_template.gendrv); - if (err) - goto err_out_class; - sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 0, 0, NULL); if (!sd_cdb_cache) { @@ -2936,8 +2932,15 @@ static int __init init_sd(void) goto err_out_cache; } + err = scsi_register_driver(&sd_template.gendrv); + if (err) + goto err_out_driver; + return 0; +err_out_driver: + mempool_destroy(sd_cdb_pool); + err_out_cache: kmem_cache_destroy(sd_cdb_cache); @@ -2960,10 +2963,10 @@ static void __exit exit_sd(void) SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); + scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_cdb_pool); kmem_cache_destroy(sd_cdb_cache); - scsi_unregister_driver(&sd_template.gendrv); class_unregister(&sd_disk_class); for (i = 0; i < SD_MAJORS; i++) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 83a1972a1999..40a45700c8db 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1211,7 +1211,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) /* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us + * There is a potential race here where the host may be in + * the process of responding when we return from here. + * Just wait for all in-transit packets to be accounted for + * before we return from here. */ + storvsc_wait_to_drain(stor_device); return SUCCESS; } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1b3843117268..6661610c752e 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -198,7 +198,7 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, int i; for_each_sg(table->sgl, sg_elem, table->nents, i) - sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length); + sg[idx++] = *sg_elem; *p_idx = idx; } diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 85ed6cfd1803..c831dcc58e78 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -671,7 +671,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); - *page = alloc_page(GFP_KERNEL | __GFP_ZERO); + *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (*page == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); @@ -2548,14 +2548,38 @@ static void binder_release_work(struct list_head *list) struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); - if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) + if (t->buffer->target_node && + !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, BR_DEAD_REPLY); + } else { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "binder: undelivered transaction %d\n", + t->debug_id); + t->buffer->transaction = NULL; + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); + } } break; case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "binder: undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { + struct binder_ref_death *death; + + death = container_of(w, struct binder_ref_death, work); + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "binder: undelivered death notification, %p\n", + death->cookie); + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } break; default: + pr_err("binder: unexpected work type, %d, not freed\n", + w->type); break; } } @@ -3036,6 +3060,7 @@ static void binder_deferred_release(struct binder_proc *proc) nodes++; rb_erase(&node->rb_node, &proc->nodes); list_del_init(&node->work.entry); + binder_release_work(&node->async_todo); if (hlist_empty(&node->refs)) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); @@ -3074,6 +3099,7 @@ static void binder_deferred_release(struct binder_proc *proc) binder_delete_ref(ref); } binder_release_work(&proc->todo); + binder_release_work(&proc->delivered_death); buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index 12c691d90900..f11cfc870129 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -424,6 +424,7 @@ config COMEDI_ADQ12B config COMEDI_NI_AT_A2150 tristate "NI AT-A2150 ISA card support" + select COMEDI_FC depends on COMEDI_NI_COMMON depends on VIRT_TO_BUS default N diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index a796964bfff4..3799cf1c2b64 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -136,8 +136,16 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, /* Device config is special, because it must work on * an unconfigured device. */ if (cmd == COMEDI_DEVCONFIG) { + if (minor >= COMEDI_NUM_BOARD_MINORS) { + /* Device config not appropriate on non-board minors. */ + rc = -ENOTTY; + goto done; + } rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); + if (rc == 0) + /* Evade comedi_auto_unconfig(). */ + dev_file_info->hardware_device = NULL; goto done; } @@ -843,7 +851,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, ret = -EAGAIN; break; } - ret = s->async->inttrig(dev, s, insn->data[0]); + ret = s->async->inttrig(dev, s, data[0]); if (ret >= 0) ret = 1; break; @@ -1088,7 +1096,6 @@ static int do_cmd_ioctl(struct comedi_device *dev, goto cleanup; } - kfree(async->cmd.chanlist); async->cmd = user_cmd; async->cmd.data = NULL; /* load channel/gain list */ @@ -1570,7 +1577,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) mask = 0; read_subdev = comedi_get_read_subdevice(dev_file_info); - if (read_subdev) { + if (read_subdev && read_subdev->async) { poll_wait(file, &read_subdev->async->wait_head, wait); if (!read_subdev->busy || comedi_buf_read_n_available(read_subdev->async) > 0 @@ -1580,7 +1587,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) } } write_subdev = comedi_get_write_subdevice(dev_file_info); - if (write_subdev) { + if (write_subdev && write_subdev->async) { poll_wait(file, &write_subdev->async->wait_head, wait); comedi_buf_write_alloc(write_subdev->async, write_subdev->async->prealloc_bufsz); @@ -1622,7 +1629,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, } s = comedi_get_write_subdevice(dev_file_info); - if (s == NULL) { + if (s == NULL || s->async == NULL) { retval = -EIO; goto done; } @@ -1733,7 +1740,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, } s = comedi_get_read_subdevice(dev_file_info); - if (s == NULL) { + if (s == NULL || s->async == NULL) { retval = -EIO; goto done; } @@ -1833,6 +1840,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s) if (async) { comedi_reset_async_buf(async); async->inttrig = NULL; + kfree(async->cmd.chanlist); + async->cmd.chanlist = NULL; } else { printk(KERN_ERR "BUG: (?) do_become_nonbusy called with async=0\n"); @@ -2206,6 +2215,7 @@ int comedi_alloc_board_minor(struct device *hardware_device) kfree(info); return -ENOMEM; } + info->hardware_device = hardware_device; comedi_device_init(info->device); spin_lock_irqsave(&comedi_file_info_table_lock, flags); for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) { @@ -2294,6 +2304,23 @@ void comedi_free_board_minor(unsigned minor) } } +int comedi_find_board_minor(struct device *hardware_device) +{ + int minor; + struct comedi_device_file_info *info; + + for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) { + spin_lock(&comedi_file_info_table_lock); + info = comedi_file_info_table[minor]; + if (info && info->hardware_device == hardware_device) { + spin_unlock(&comedi_file_info_table_lock); + return minor; + } + spin_unlock(&comedi_file_info_table_lock); + } + return -ENODEV; +} + int comedi_alloc_subdevice_minor(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index 7a0d4bcbc355..00d3c65589bd 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -234,6 +234,7 @@ struct comedi_device_file_info { struct comedi_device *device; struct comedi_subdevice *read_subdevice; struct comedi_subdevice *write_subdevice; + struct device *hardware_device; }; #ifdef CONFIG_COMEDI_DEBUG diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index bf185e2807d1..0fc1b9939d97 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -822,25 +822,14 @@ static int comedi_auto_config(struct device *hardware_device, int minor; struct comedi_device_file_info *dev_file_info; int retval; - unsigned *private_data = NULL; - if (!comedi_autoconfig) { - dev_set_drvdata(hardware_device, NULL); + if (!comedi_autoconfig) return 0; - } minor = comedi_alloc_board_minor(hardware_device); if (minor < 0) return minor; - private_data = kmalloc(sizeof(unsigned), GFP_KERNEL); - if (private_data == NULL) { - retval = -ENOMEM; - goto cleanup; - } - *private_data = minor; - dev_set_drvdata(hardware_device, private_data); - dev_file_info = comedi_get_device_file_info(minor); memset(&it, 0, sizeof(it)); @@ -853,25 +842,22 @@ static int comedi_auto_config(struct device *hardware_device, retval = comedi_device_attach(dev_file_info->device, &it); mutex_unlock(&dev_file_info->device->mutex); -cleanup: - if (retval < 0) { - kfree(private_data); + if (retval < 0) comedi_free_board_minor(minor); - } return retval; } static void comedi_auto_unconfig(struct device *hardware_device) { - unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device); - if (minor == NULL) - return; - - BUG_ON(*minor >= COMEDI_NUM_BOARD_MINORS); + int minor; - comedi_free_board_minor(*minor); - dev_set_drvdata(hardware_device, NULL); - kfree(minor); + if (hardware_device == NULL) + return; + minor = comedi_find_board_minor(hardware_device); + if (minor < 0) + return; + BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS); + comedi_free_board_minor(minor); } int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name) diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c index 7972cadd403e..9a23d20938a5 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236.c +++ b/drivers/staging/comedi/drivers/amplc_pc236.c @@ -468,7 +468,7 @@ static int pc236_detach(struct comedi_device *dev) { printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, PC236_DRIVER_NAME); - if (devpriv) + if (dev->iobase) pc236_intr_disable(dev); if (dev->irq) diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c index a804742b8022..2567f9ab4167 100644 --- a/drivers/staging/comedi/drivers/comedi_test.c +++ b/drivers/staging/comedi/drivers/comedi_test.c @@ -461,7 +461,7 @@ static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->timer_running = 0; - del_timer(&devpriv->timer); + del_timer_sync(&devpriv->timer); return 0; } diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c index c2dd0ed36a73..d2dd75e2af7f 100644 --- a/drivers/staging/comedi/drivers/das08.c +++ b/drivers/staging/comedi/drivers/das08.c @@ -653,7 +653,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev, int chan; lsb = data[0] & 0xff; - msb = (data[0] >> 8) & 0xf; + msb = (data[0] >> 8) & 0xff; chan = CR_CHAN(insn->chanspec); diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index 6a79ba10630d..d7e63419e617 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -905,7 +905,7 @@ static int jr3_pci_attach(struct comedi_device *dev, } /* Reset DSP card */ - devpriv->iobase->channel[0].reset = 0; + writel(0, &devpriv->iobase->channel[0].reset); result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware); dev_dbg(dev->hw_dev, "Firmare load %d\n", result); diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 721b2be22500..0517a2315efc 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -1264,7 +1264,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) else channel = CR_CHAN(cmd->chanlist[0]); /* munge channel bits for differential / scan disabled mode */ - if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF) + if ((labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN || + labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN_INTERVAL) && + aref == AREF_DIFF) channel *= 2; devpriv->command1_bits |= ADC_CHAN_BITS(channel); devpriv->command1_bits |= thisboard->ai_range_code[range]; @@ -1280,21 +1282,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); } - /* setup any external triggering/pacing (command4 register) */ - devpriv->command4_bits = 0; - if (cmd->convert_src != TRIG_EXT) - devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; - /* XXX should discard first scan when using interval scanning - * since manual says it is not synced with scan clock */ - if (labpc_use_continuous_mode(cmd) == 0) { - devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; - if (cmd->scan_begin_src == TRIG_EXT) - devpriv->command4_bits |= EXT_SCAN_EN_BIT; - } - /* single-ended/differential */ - if (aref == AREF_DIFF) - devpriv->command4_bits |= ADC_DIFF_BIT; - devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); devpriv->write_byte(cmd->chanlist_len, dev->iobase + INTERVAL_COUNT_REG); @@ -1374,6 +1361,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); + /* setup any external triggering/pacing (command4 register) */ + devpriv->command4_bits = 0; + if (cmd->convert_src != TRIG_EXT) + devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; + /* XXX should discard first scan when using interval scanning + * since manual says it is not synced with scan clock */ + if (labpc_use_continuous_mode(cmd) == 0) { + devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; + if (cmd->scan_begin_src == TRIG_EXT) + devpriv->command4_bits |= EXT_SCAN_EN_BIT; + } + /* single-ended/differential */ + if (aref == AREF_DIFF) + devpriv->command4_bits |= ADC_DIFF_BIT; + devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); + /* startup acquisition */ /* command2 reg */ diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 27baefa32b17..59c1f88894a8 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c @@ -1023,7 +1023,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_625x_ao, .reg_type = ni_reg_625x, .ao_unipolar = 0, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 8, .caldac = {caldac_none}, .has_8255 = 0, @@ -1042,7 +1042,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_625x_ao, .reg_type = ni_reg_625x, .ao_unipolar = 0, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 8, .caldac = {caldac_none}, .has_8255 = 0, @@ -1061,7 +1061,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_625x_ao, .reg_type = ni_reg_625x, .ao_unipolar = 0, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 8, .caldac = {caldac_none}, .has_8255 = 0, @@ -1097,7 +1097,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_625x_ao, .reg_type = ni_reg_625x, .ao_unipolar = 0, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 32, .caldac = {caldac_none}, .has_8255 = 0, @@ -1116,7 +1116,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_625x_ao, .reg_type = ni_reg_625x, .ao_unipolar = 0, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 32, .caldac = {caldac_none}, .has_8255 = 0, @@ -1152,7 +1152,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_628x_ao, .reg_type = ni_reg_628x, .ao_unipolar = 1, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 8, .caldac = {caldac_none}, .has_8255 = 0, @@ -1171,7 +1171,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_628x_ao, .reg_type = ni_reg_628x, .ao_unipolar = 1, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 8, .caldac = {caldac_none}, .has_8255 = 0, @@ -1207,7 +1207,7 @@ static const struct ni_board_struct ni_boards[] = { .ao_range_table = &range_ni_M_628x_ao, .reg_type = ni_reg_628x, .ao_unipolar = 1, - .ao_speed = 357, + .ao_speed = 350, .num_p0_dio_channels = 32, .caldac = {caldac_none}, .has_8255 = 0, diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 23fc64b9988e..c72128f30f17 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev, /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */ k->SetMode(dev, k, Setup, TRUE); - Preload(dev, k, *(insn->data)); + Preload(dev, k, data[0]); k->PulseIndex(dev, k); SetLatchSource(dev, k, valueSrclatch); k->SetEnable(dev, k, (uint16_t) (enab != 0)); diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h index 434ce3433368..4208fb4cf0ff 100644 --- a/drivers/staging/comedi/internal.h +++ b/drivers/staging/comedi/internal.h @@ -7,6 +7,7 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); int comedi_alloc_board_minor(struct device *hardware_device); void comedi_free_board_minor(unsigned minor); +int comedi_find_board_minor(struct device *hardware_device); void comedi_reset_async_buf(struct comedi_async *async); int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long new_size); diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c index 945d9623550b..4afc3b419738 100644 --- a/drivers/staging/media/lirc/lirc_sir.c +++ b/drivers/staging/media/lirc/lirc_sir.c @@ -52,6 +52,7 @@ #include <linux/io.h> #include <asm/irq.h> #include <linux/fcntl.h> +#include <linux/platform_device.h> #ifdef LIRC_ON_SA1100 #include <asm/hardware.h> #ifdef CONFIG_SA1100_COLLIE @@ -487,9 +488,11 @@ static struct lirc_driver driver = { .owner = THIS_MODULE, }; +static struct platform_device *lirc_sir_dev; static int init_chrdev(void) { + driver.dev = &lirc_sir_dev->dev; driver.minor = lirc_register_driver(&driver); if (driver.minor < 0) { printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); @@ -1215,20 +1218,71 @@ static int init_lirc_sir(void) return 0; } +static int __devinit lirc_sir_probe(struct platform_device *dev) +{ + return 0; +} + +static int __devexit lirc_sir_remove(struct platform_device *dev) +{ + return 0; +} + +static struct platform_driver lirc_sir_driver = { + .probe = lirc_sir_probe, + .remove = __devexit_p(lirc_sir_remove), + .driver = { + .name = "lirc_sir", + .owner = THIS_MODULE, + }, +}; static int __init lirc_sir_init(void) { int retval; + retval = platform_driver_register(&lirc_sir_driver); + if (retval) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register " + "failed!\n"); + return -ENODEV; + } + + lirc_sir_dev = platform_device_alloc("lirc_dev", 0); + if (!lirc_sir_dev) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc " + "failed!\n"); + retval = -ENOMEM; + goto pdev_alloc_fail; + } + + retval = platform_device_add(lirc_sir_dev); + if (retval) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add " + "failed!\n"); + retval = -ENODEV; + goto pdev_add_fail; + } + retval = init_chrdev(); if (retval < 0) - return retval; + goto fail; + retval = init_lirc_sir(); if (retval) { drop_chrdev(); - return retval; + goto fail; } + return 0; + +fail: + platform_device_del(lirc_sir_dev); +pdev_add_fail: + platform_device_put(lirc_sir_dev); +pdev_alloc_fail: + platform_driver_unregister(&lirc_sir_driver); + return retval; } static void __exit lirc_sir_exit(void) @@ -1236,6 +1290,8 @@ static void __exit lirc_sir_exit(void) drop_hardware(); drop_chrdev(); drop_port(); + platform_device_unregister(lirc_sir_dev); + platform_driver_unregister(&lirc_sir_driver); printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); } diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c index 0e26d5f6cf2d..495ee1205e02 100644 --- a/drivers/staging/rtl8712/recv_linux.c +++ b/drivers/staging/rtl8712/recv_linux.c @@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter, if (skb == NULL) goto _recv_indicatepkt_drop; skb->data = precv_frame->u.hdr.rx_data; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail - - precv_frame->u.hdr.rx_head); -#else - skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail; -#endif skb->len = precv_frame->u.hdr.len; + skb_set_tail_pointer(skb, skb->len); if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) skb->ip_summed = CHECKSUM_UNNECESSARY; else diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c index fa6dc9c09b3f..887a80709ab8 100644 --- a/drivers/staging/rtl8712/rtl8712_recv.c +++ b/drivers/staging/rtl8712/rtl8712_recv.c @@ -1126,6 +1126,9 @@ static void recv_tasklet(void *priv) recvbuf2recvframe(padapter, pskb); skb_reset_tail_pointer(pskb); pskb->len = 0; - skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); + if (!skb_cloned(pskb)) + skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); + else + consume_skb(pskb); } } diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index 2c80745a446c..7b3ae00ac54a 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -66,6 +66,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = { {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */ /* Belkin */ {USB_DEVICE(0x050D, 0x945A)}, + /* ISY IWL - Belkin clone */ + {USB_DEVICE(0x050D, 0x11F1)}, /* Corega */ {USB_DEVICE(0x07AA, 0x0047)}, /* D-Link */ diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 92b34e29ad06..40e2488b9679 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc) static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key) { - static u_char *goto_buf = "\0\0\0\0\0\0"; + static u_char goto_buf[8]; static int num; int maxlen, go_pos; char *cp; diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index 42cdafeea35e..b5130c8bcb64 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth); static unsigned char get_index(void); static struct miscdevice synth_device; -static int initialized; +static int init_pos; static int misc_registered; static struct var_t vars[] = { @@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp) unsigned long flags; spk_lock(flags); synth_soft.alive = 0; - initialized = 0; + init_pos = 0; spk_unlock(flags); /* Make sure we let applications go before leaving */ speakup_start_ttys(); @@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count, ch = '\x18'; } else if (synth_buffer_empty()) { break; - } else if (!initialized) { - if (*init) { - ch = *init; - init++; - } else { - initialized = 1; - } + } else if (init[init_pos]) { + ch = init[init_pos++]; } else { ch = synth_buffer_getc(); } diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 331eae788700..5710fc582beb 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -342,7 +342,7 @@ int synth_init(char *synth_name) mutex_lock(&spk_mutex); /* First, check if we already have it loaded. */ - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) if (strcmp(synths[i]->name, synth_name) == 0) synth = synths[i]; @@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth) int i; int status = 0; mutex_lock(&spk_mutex); - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) /* synth_remove() is responsible for rotating the array down */ if (in_synth == synths[i]) { mutex_unlock(&spk_mutex); diff --git a/drivers/staging/telephony/ixj.c b/drivers/staging/telephony/ixj.c index f96027921f60..638d2b8c687c 100644 --- a/drivers/staging/telephony/ixj.c +++ b/drivers/staging/telephony/ixj.c @@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j) ixj_fsk_alloc(j); - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); @@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j) ixj_pre_cid(j); } j->flags.cidcw_ack = 0; - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index a8f97ebb659b..991ce3eecd8d 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h @@ -92,7 +92,6 @@ typedef struct tagSRSNCapObject { } SRSNCapObject, *PSRSNCapObject; // BSS info(AP) -#pragma pack(1) typedef struct tagKnownBSS { // BSS info BOOL bActive; diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c index c0edf97535dc..4664e9d81d04 100644 --- a/drivers/staging/vt6656/dpc.c +++ b/drivers/staging/vt6656/dpc.c @@ -200,7 +200,7 @@ s_vProcessRxMACHeader ( } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) { cbHeaderSize += 6; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); - if ((*pwType == cpu_to_le16(ETH_P_IPX)) || + if ((*pwType == cpu_to_be16(ETH_P_IPX)) || (*pwType == cpu_to_le16(0xF380))) { cbHeaderSize -= 8; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); @@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); } else { @@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index a5d96b968176..27bc7f548abb 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h @@ -34,7 +34,6 @@ #include "device.h" /*--------------------- Export Definitions -------------------------*/ -#pragma pack(1) typedef struct tagSINTData { BYTE byTSR0; BYTE byPkt0; diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef751d..ae6e2d237b20 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h @@ -95,13 +95,12 @@ typedef enum tagWZONETYPE { // Ioctl interface structure // Command structure // -#pragma pack(1) typedef struct tagSCmdRequest { u8 name[16]; void *data; u16 wResult; u16 wCmdCode; -} SCmdRequest, *PSCmdRequest; +} __packed SCmdRequest, *PSCmdRequest; // // Scan @@ -111,7 +110,7 @@ typedef struct tagSCmdScan { u8 ssid[SSID_MAXLEN + 2]; -} SCmdScan, *PSCmdScan; +} __packed SCmdScan, *PSCmdScan; // // BSS Join @@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin { BOOL bPSEnable; BOOL bShareKeyAuth; -} SCmdBSSJoin, *PSCmdBSSJoin; +} __packed SCmdBSSJoin, *PSCmdBSSJoin; // // Zonetype Setting @@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet { BOOL bWrite; WZONETYPE ZoneType; -} SCmdZoneTypeSet, *PSCmdZoneTypeSet; +} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; typedef struct tagSWPAResult { char ifname[100]; @@ -145,7 +144,7 @@ typedef struct tagSWPAResult { u8 key_mgmt; u8 eap_type; BOOL authenticated; -} SWPAResult, *PSWPAResult; +} __packed SWPAResult, *PSWPAResult; typedef struct tagSCmdStartAP { @@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP { BOOL bShareKeyAuth; u8 byBasicRate; -} SCmdStartAP, *PSCmdStartAP; +} __packed SCmdStartAP, *PSCmdStartAP; typedef struct tagSCmdSetWEP { @@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP { BOOL bWepKeyAvailable[WEP_NKEYS]; u32 auWepKeyLength[WEP_NKEYS]; -} SCmdSetWEP, *PSCmdSetWEP; +} __packed SCmdSetWEP, *PSCmdSetWEP; typedef struct tagSBSSIDItem { @@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem { BOOL bWEPOn; u32 uRSSI; -} SBSSIDItem; +} __packed SBSSIDItem; typedef struct tagSBSSIDList { u32 uItem; SBSSIDItem sBSSIDList[0]; -} SBSSIDList, *PSBSSIDList; +} __packed SBSSIDList, *PSBSSIDList; typedef struct tagSNodeItem { @@ -208,7 +207,7 @@ typedef struct tagSNodeItem { u32 uTxAttempts; u16 wFailureRatio; -} SNodeItem; +} __packed SNodeItem; typedef struct tagSNodeList { @@ -216,7 +215,7 @@ typedef struct tagSNodeList { u32 uItem; SNodeItem sNodeList[0]; -} SNodeList, *PSNodeList; +} __packed SNodeList, *PSNodeList; typedef struct tagSCmdLinkStatus { @@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus { u32 uChannel; u32 uLinkRate; -} SCmdLinkStatus, *PSCmdLinkStatus; +} __packed SCmdLinkStatus, *PSCmdLinkStatus; // // 802.11 counter @@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount { u32 ReceivedFragmentCount; u32 MulticastReceivedFrameCount; u32 FCSErrorCount; -} SDot11MIBCount, *PSDot11MIBCount; +} __packed SDot11MIBCount, *PSDot11MIBCount; @@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount { u32 ullTxBroadcastBytes[2]; u32 ullTxMulticastBytes[2]; u32 ullTxDirectedBytes[2]; -} SStatMIBCount, *PSStatMIBCount; +} __packed SStatMIBCount, *PSStatMIBCount; typedef struct tagSCmdValue { u32 dwValue; -} SCmdValue, *PSCmdValue; +} __packed SCmdValue, *PSCmdValue; // // hostapd & viawget ioctl related @@ -431,7 +430,7 @@ struct viawget_hostapd_param { u8 ssid[32]; } scan_req; } u; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868f6e2..2522ddec718d 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h @@ -67,12 +67,11 @@ enum { -#pragma pack(1) typedef struct viawget_wpa_header { u8 type; u16 req_ie_len; u16 resp_ie_len; -} viawget_wpa_header; +} __packed viawget_wpa_header; struct viawget_wpa_param { u32 cmd; @@ -113,9 +112,8 @@ struct viawget_wpa_param { u8 *buf; } scan_results; } u; -}; +} __packed; -#pragma pack(1) struct viawget_scan_result { u8 bssid[6]; u8 ssid[32]; @@ -130,7 +128,7 @@ struct viawget_scan_result { int noise; int level; int maxrate; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index ee62a06a75f4..ba3a561a79a2 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -223,7 +223,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -235,7 +235,8 @@ BOOL KeybSetKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Enter KeybSetKey: %X\n", dwKeyIndex); j = (MAX_KEY_TABLE-1); for (i=0;i<(MAX_KEY_TABLE-1);i++) { @@ -261,7 +262,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); @@ -302,9 +305,12 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", + pKey->wTSC15_0); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -326,7 +332,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(N)[%X]: %d\n", + pTable->KeyTable[j].dwGTKeyIndex, j); } pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4); @@ -367,9 +375,11 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType, DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n", + pTable->KeyTable[i].dwGTKeyIndex); return (TRUE); } @@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -696,7 +707,10 @@ BOOL KeybSetDefaultKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, + MAX_KEY_TABLE-1); } pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed @@ -747,9 +761,11 @@ BOOL KeybSetDefaultKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n", + pKey->dwKeyIndex); return (TRUE); } @@ -775,7 +791,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -787,7 +803,8 @@ BOOL KeybSetAllGroupKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n", + dwKeyIndex); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key @@ -804,7 +821,9 @@ BOOL KeybSetAllGroupKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h index f749c7a027d3..bd35d39621ae 100644 --- a/drivers/staging/vt6656/key.h +++ b/drivers/staging/vt6656/key.h @@ -58,7 +58,7 @@ typedef struct tagSKeyItem { BOOL bKeyValid; - unsigned long uKeyLength; + u32 uKeyLength; BYTE abyKey[MAX_KEY_LEN]; QWORD KeyRSC; DWORD dwTSC47_16; @@ -107,7 +107,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c index af4a29d14775..8fddc7b3930b 100644 --- a/drivers/staging/vt6656/mac.c +++ b/drivers/staging/vt6656/mac.c @@ -260,7 +260,8 @@ BYTE pbyData[24]; dwData1 <<= 16; dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\ + " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); @@ -277,7 +278,8 @@ BYTE pbyData[24]; dwData2 <<= 8; dwData2 |= *(pbyAddr+0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n", + wOffset, dwData2); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 763e028a5cc5..87394541d715 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode"); // Static vars definitions // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 3fd0478a9a54..8cf0881888b2 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr; return TRUE; } + if (uCH == 0) + return -EINVAL; + switch (uRATE) { case RATE_1M: case RATE_2M: diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 9b64b102f55c..3beb126e90f8 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -377,7 +377,8 @@ s_vFillTxKey ( *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV // Append IV&ExtIV after Mac Header *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n", + *pdwExtIV); } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { pTransmitKey->wTSC15_0++; @@ -1701,7 +1702,7 @@ s_bPacketToWirelessUsb( // 802.1H if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) { if (pDevice->dwDiagRefCount == 0) { - if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) || + if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) || (psEthHeader->wType == cpu_to_le16(0xF380))) { memcpy((PBYTE) (pbyPayloadHead), abySNAP_Bridgetunnel, 6); @@ -1753,7 +1754,8 @@ s_bPacketToWirelessUsb( MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n", + dwMICKey0, dwMICKey1); /////////////////////////////////////////////////////////////////// @@ -2635,7 +2637,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\ + " %X, %X\n", dwMICKey0, dwMICKey1); uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen; @@ -2655,7 +2658,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n", + *pdwMIC_L, *pdwMIC_R); } @@ -2840,10 +2844,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) Packet_Type = skb->data[ETH_HLEN+1]; Descriptor_type = skb->data[ETH_HLEN+1+1+2]; Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]); - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - /* 802.1x OR eapol-key challenge frame transfer */ - if (((Protocol_Version == 1) || (Protocol_Version == 2)) && - (Packet_Type == 3)) { + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + /* 802.1x OR eapol-key challenge frame transfer */ + if (((Protocol_Version == 1) || (Protocol_Version == 2)) && + (Packet_Type == 3)) { bTxeapol_key = TRUE; if(!(Key_info & BIT3) && //WPA or RSN group-key challenge (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key @@ -2989,19 +2993,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) } } - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - if (pDevice->byBBType != BB_TYPE_11A) { - pDevice->wCurrentRate = RATE_1M; - pDevice->byACKRate = RATE_1M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } else { - pDevice->wCurrentRate = RATE_6M; - pDevice->byACKRate = RATE_6M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } - } + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + if (pDevice->byBBType != BB_TYPE_11A) { + pDevice->wCurrentRate = RATE_1M; + pDevice->byACKRate = RATE_1M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } else { + pDevice->wCurrentRate = RATE_6M; + pDevice->byACKRate = RATE_6M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } + } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n", @@ -3017,7 +3021,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (bNeedEncryption == TRUE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType)); - if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) { + if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) { bNeedEncryption = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType)); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { @@ -3029,7 +3033,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n"); } else { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } @@ -3043,7 +3048,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (pDevice->bEnableHostWEP) { if ((uNodeIndex != 0) && (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h index 8e9450ef3997..dfbf74713a80 100644 --- a/drivers/staging/vt6656/ttype.h +++ b/drivers/staging/vt6656/ttype.h @@ -29,6 +29,8 @@ #ifndef __TTYPE_H__ #define __TTYPE_H__ +#include <linux/types.h> + /******* Common definitions and typedefs ***********************************/ typedef int BOOL; @@ -42,17 +44,17 @@ typedef int BOOL; /****** Simple typedefs ***************************************************/ -typedef unsigned char BYTE; // 8-bit -typedef unsigned short WORD; // 16-bit -typedef unsigned long DWORD; // 32-bit +typedef u8 BYTE; +typedef u16 WORD; +typedef u32 DWORD; // QWORD is for those situation that we want // an 8-byte-aligned 8 byte long structure // which is NOT really a floating point number. typedef union tagUQuadWord { struct { - DWORD dwLowDword; - DWORD dwHighDword; + u32 dwLowDword; + u32 dwHighDword; } u; double DoNotUseThisField; } UQuadWord; @@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit /****** Common pointer types ***********************************************/ -typedef unsigned long ULONG_PTR; // 32-bit -typedef unsigned long DWORD_PTR; // 32-bit +typedef u32 ULONG_PTR; +typedef u32 DWORD_PTR; // boolean pointer diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index c612ab58f389..f759352a268a 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -168,6 +168,11 @@ int PIPEnsControlOut( if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; + if (pDevice->Flags & fMP_CONTROL_READS) + return STATUS_FAILURE; + + MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); + pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -182,12 +187,13 @@ int PIPEnsControlOut( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "control send request submission failed: %d\n", + ntStatus); + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } - else { - MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); - } + spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { @@ -227,6 +233,11 @@ int PIPEnsControlIn( if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; + if (pDevice->Flags & fMP_CONTROL_WRITES) + return STATUS_FAILURE; + + MP_SET_FLAG(pDevice, fMP_CONTROL_READS); + pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -240,10 +251,11 @@ int PIPEnsControlIn( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); - }else { - MP_SET_FLAG(pDevice, fMP_CONTROL_READS); - } + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "control request submission failed: %d\n", ntStatus); + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); + return STATUS_FAILURE; + } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 78ea121b7e2e..31fb96a54cff 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -316,17 +316,19 @@ s_MgrMakeProbeRequest( return pTxPacket; } -void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond) +void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond) { - PSDevice pDevice = (PSDevice)hDeviceContext; + PSDevice pDevice = (PSDevice)hDeviceContext; - init_timer(&pDevice->sTimerCommand); - pDevice->sTimerCommand.data = (unsigned long)pDevice; - pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; - // RUN_AT :1 msec ~= (HZ/1024) - pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10); - add_timer(&pDevice->sTimerCommand); - return; + init_timer(&pDevice->sTimerCommand); + + pDevice->sTimerCommand.data = (unsigned long)pDevice; + pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; + pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000); + + add_timer(&pDevice->sTimerCommand); + + return; } void vRunCommand(void *hDeviceContext) diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h index 46c295905b48..c359252a6b0e 100644 --- a/drivers/staging/vt6656/wpa2.h +++ b/drivers/staging/vt6656/wpa2.h @@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo { } PMKIDInfo, *PPMKIDInfo; typedef struct tagSPMKIDCache { - unsigned long BSSIDInfoCount; - PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; + u32 BSSIDInfoCount; + PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; } SPMKIDCache, *PSPMKIDCache; diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index c3751a718384..9160049ff546 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index c3bb05dd744f..4110a2f9425e 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp) /* SSID */ req->ssid.status = P80211ENUM_msgitem_status_data_ok; req->ssid.data.len = le16_to_cpu(item->ssid.len); - req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); + req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN); memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); /* supported rates */ diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2734dacacbaf..1812bedab324 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1259,13 +1259,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) { - int ret = 0; - BUG_ON(!is_ephemeral(pool)); - zbud_decompress((struct page *)(data), pampd); + if (zbud_decompress((struct page *)(data), pampd) < 0) + return -EINVAL; zbud_free_and_delist((struct zbud_hdr *)pampd); atomic_dec(&zcache_curr_eph_pampd_count); - return ret; + return 0; } /* diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index e326d17e05a8..486c5dd45016 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2359,7 +2359,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) if (!conn_p) return; - cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); if (!cmd) { iscsit_dec_conn_usage_count(conn_p); return; @@ -3196,7 +3196,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) len += 1; if ((len + payload_len) > buffer_len) { - spin_unlock(&tiqn->tiqn_tpg_lock); end_of_buf = 1; goto eob; } @@ -3349,6 +3348,7 @@ static int iscsit_send_reject( hdr->opcode = ISCSI_OP_REJECT; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hton24(hdr->dlength, ISCSI_HDR_LEN); + hdr->ffffffff = 0xffffffff; cmd->stat_sn = conn->stat_sn++; hdr->statsn = cpu_to_be32(cmd->stat_sn); hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); @@ -3514,7 +3514,9 @@ restart: */ iscsit_thread_check_cpumask(conn, current, 1); - schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); + wait_event_interruptible(conn->queues_wq, + !iscsit_conn_all_queues_empty(conn) || + ts->status == ISCSI_THREAD_SET_RESET); if ((ts->status == ISCSI_THREAD_SET_RESET) || signal_pending(current)) diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index d1c4bc22cca4..2e46ea404cab 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -25,10 +25,10 @@ #define NA_DATAOUT_TIMEOUT_RETRIES 5 #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15 #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1 -#define NA_NOPIN_TIMEOUT 5 +#define NA_NOPIN_TIMEOUT 15 #define NA_NOPIN_TIMEOUT_MAX 60 #define NA_NOPIN_TIMEOUT_MIN 3 -#define NA_NOPIN_RESPONSE_TIMEOUT 5 +#define NA_NOPIN_RESPONSE_TIMEOUT 30 #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60 #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3 #define NA_RANDOM_DATAIN_PDU_OFFSETS 0 @@ -491,6 +491,7 @@ struct iscsi_tmr_req { }; struct iscsi_conn { + wait_queue_head_t queues_wq; /* Authentication Successful for this connection */ u8 auth_complete; /* State connection is currently in */ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index ae304248c8c2..3cb7a4f5b988 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -45,6 +45,7 @@ extern spinlock_t sess_idr_lock; static int iscsi_login_init_conn(struct iscsi_conn *conn) { + init_waitqueue_head(&conn->queues_wq); INIT_LIST_HEAD(&conn->conn_list); INIT_LIST_HEAD(&conn->conn_cmd_list); INIT_LIST_HEAD(&conn->immed_queue_list); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 879d8d0fa3fe..c3d7bf54f3de 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -672,6 +672,12 @@ int iscsit_ta_generate_node_acls( pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n", tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled"); + if (flag == 1 && a->cache_dynamic_acls == 0) { + pr_debug("Explicitly setting cache_dynamic_acls=1 when " + "generate_node_acls=1\n"); + a->cache_dynamic_acls = 1; + } + return 0; } @@ -711,6 +717,12 @@ int iscsit_ta_cache_dynamic_acls( return -EINVAL; } + if (a->generate_node_acls == 1 && flag == 0) { + pr_debug("Skipping cache_dynamic_acls=0 when" + " generate_node_acls=1\n"); + return 0; + } + a->cache_dynamic_acls = flag; pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group" " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ? diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 4eba86d2bd82..4c05ed6beccc 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -656,7 +656,7 @@ void iscsit_add_cmd_to_immediate_queue( atomic_set(&conn->check_immediate_queue, 1); spin_unlock_bh(&conn->immed_queue_lock); - wake_up_process(conn->thread_set->tx_thread); + wake_up(&conn->queues_wq); } struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) @@ -730,7 +730,7 @@ void iscsit_add_cmd_to_response_queue( atomic_inc(&cmd->response_queue_count); spin_unlock_bh(&conn->response_queue_lock); - wake_up_process(conn->thread_set->tx_thread); + wake_up(&conn->queues_wq); } struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) @@ -784,6 +784,24 @@ static void iscsit_remove_cmd_from_response_queue( } } +bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) +{ + bool empty; + + spin_lock_bh(&conn->immed_queue_lock); + empty = list_empty(&conn->immed_queue_list); + spin_unlock_bh(&conn->immed_queue_lock); + + if (!empty) + return empty; + + spin_lock_bh(&conn->response_queue_lock); + empty = list_empty(&conn->response_queue_list); + spin_unlock_bh(&conn->response_queue_lock); + + return empty; +} + void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) { struct iscsi_queue_req *qr, *qr_tmp; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 835bf7de0281..cfac698fb3cb 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -28,6 +28,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_ extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); +extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); extern void iscsit_free_cmd(struct iscsi_cmd *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index cbb66537d230..dbcede31ff8b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -3115,6 +3115,7 @@ static int __init target_core_init_configfs(void) GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); + ret = -ENOMEM; goto out_global; } @@ -3130,6 +3131,7 @@ static int __init target_core_init_configfs(void) GFP_KERNEL); if (!hba_cg->default_groups) { pr_err("Unable to allocate hba_cg->default_groups\n"); + ret = -ENOMEM; goto out_global; } config_group_init_type_name(&alua_group, @@ -3145,6 +3147,7 @@ static int __init target_core_init_configfs(void) GFP_KERNEL); if (!alua_cg->default_groups) { pr_err("Unable to allocate alua_cg->default_groups\n"); + ret = -ENOMEM; goto out_global; } @@ -3156,14 +3159,17 @@ static int __init target_core_init_configfs(void) * Add core/alua/lu_gps/default_lu_gp */ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); - if (IS_ERR(lu_gp)) + if (IS_ERR(lu_gp)) { + ret = -ENOMEM; goto out_global; + } lu_gp_cg = &alua_lu_gps_group; lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!lu_gp_cg->default_groups) { pr_err("Unable to allocate lu_gp_cg->default_groups\n"); + ret = -ENOMEM; goto out_global; } @@ -3191,7 +3197,8 @@ static int __init target_core_init_configfs(void) if (ret < 0) goto out; - if (core_dev_setup_virtual_lun0() < 0) + ret = core_dev_setup_virtual_lun0(); + if (ret < 0) goto out; return 0; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index aa6267746383..79d9865a1270 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -826,20 +826,20 @@ int se_dev_check_shutdown(struct se_device *dev) u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) { - u32 tmp, aligned_max_sectors; + u32 aligned_max_sectors; + u32 alignment; /* * Limit max_sectors to a PAGE_SIZE aligned value for modern * transport_allocate_data_tasks() operation. */ - tmp = rounddown((max_sectors * block_size), PAGE_SIZE); - aligned_max_sectors = (tmp / block_size); - if (max_sectors != aligned_max_sectors) { - printk(KERN_INFO "Rounding down aligned max_sectors from %u" - " to %u\n", max_sectors, aligned_max_sectors); - return aligned_max_sectors; - } + alignment = max(1ul, PAGE_SIZE / block_size); + aligned_max_sectors = rounddown(max_sectors, alignment); + + if (max_sectors != aligned_max_sectors) + pr_info("Rounding down aligned max_sectors from %u to %u\n", + max_sectors, aligned_max_sectors); - return max_sectors; + return aligned_max_sectors; } void se_dev_set_default_attribs( @@ -1230,6 +1230,8 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { + int block_size = dev->se_sub_dev->se_dev_attrib.block_size; + if (atomic_read(&dev->dev_export_obj.obj_access_count)) { pr_err("dev[%p]: Unable to change SE Device" " fabric_max_sectors while dev_export_obj: %d count exists\n", @@ -1267,8 +1269,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) /* * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() */ + if (!block_size) { + block_size = 512; + pr_warn("Defaulting to 512 for zero block_size\n"); + } fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, - dev->se_sub_dev->se_dev_attrib.block_size); + block_size); dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", @@ -1477,24 +1483,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_portal_group *tpg, + struct se_node_acl *nacl, u32 mapped_lun, - char *initiatorname, int *ret) { struct se_lun_acl *lacl; - struct se_node_acl *nacl; - if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { + if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { pr_err("%s InitiatorName exceeds maximum size.\n", tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } - nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!nacl) { - *ret = -EINVAL; - return NULL; - } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); if (!lacl) { pr_err("Unable to allocate memory for struct se_lun_acl.\n"); @@ -1505,7 +1505,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( INIT_LIST_HEAD(&lacl->lacl_list); lacl->mapped_lun = mapped_lun; lacl->se_lun_nacl = nacl; - snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", + nacl->initiatorname); return lacl; } @@ -1665,6 +1666,7 @@ int core_dev_setup_virtual_lun0(void) ret = PTR_ERR(dev); goto out; } + dev->dev_link_magic = SE_DEV_LINK_MAGIC; se_dev->se_dev_ptr = dev; g_lun0_dev = dev; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 405cc98eaed6..6b79ee711525 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -72,6 +72,12 @@ static int target_fabric_mappedlun_link( struct se_portal_group *se_tpg; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; int ret = 0, lun_access; + + if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { + pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" + " %p to struct lun: %p\n", lun_ci, lun); + return -EFAULT; + } /* * Ensure that the source port exists */ @@ -350,9 +356,17 @@ static struct config_group *target_fabric_make_mappedlun( ret = -EINVAL; goto out; } + if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" + "-1: %u for Target Portal Group: %u\n", mapped_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); + ret = -EINVAL; + goto out; + } - lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, - config_item_name(acl_ci), &ret); + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, + mapped_lun, &ret); if (!lacl) { ret = -EINVAL; goto out; @@ -763,6 +777,11 @@ static int target_fabric_port_link( ret = -ENODEV; goto out; } + if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) { + pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:" + " %p to struct se_device: %p\n", se_dev_ci, dev); + return -EFAULT; + } lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, lun->unpacked_lun); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 21c05638f158..17179b1af540 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -61,7 +61,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, int core_dev_del_lun(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, - u32, char *, int *); + struct se_node_acl *, u32, int *); int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, struct se_lun_acl *, u32, u32); int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index f015839aef89..4a5c6d758c47 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -140,15 +140,15 @@ void core_tmr_abort_task( printk("ABORT_TASK: Found referenced %s task_tag: %u\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); - spin_lock_irq(&se_cmd->t_state_lock); + spin_lock(&se_cmd->t_state_lock); if (se_cmd->transport_state & CMD_T_COMPLETE) { printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); - spin_unlock_irq(&se_cmd->t_state_lock); + spin_unlock(&se_cmd->t_state_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); goto out; } se_cmd->transport_state |= CMD_T_ABORTED; - spin_unlock_irq(&se_cmd->t_state_lock); + spin_unlock(&se_cmd->t_state_lock); list_del_init(&se_cmd->se_cmd_list); kref_get(&se_cmd->cmd_kref); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index e320ec24aa1b..0e17fa3da01b 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -114,16 +114,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( struct se_node_acl *acl; spin_lock_irq(&tpg->acl_node_lock); - list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!strcmp(acl->initiatorname, initiatorname) && - !acl->dynamic_node_acl) { - spin_unlock_irq(&tpg->acl_node_lock); - return acl; - } - } + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); spin_unlock_irq(&tpg->acl_node_lock); - return NULL; + return acl; } /* core_tpg_add_node_to_devs(): @@ -677,6 +671,7 @@ int core_tpg_register( for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { lun = se_tpg->tpg_lun_list[i]; lun->unpacked_lun = i; + lun->lun_link_magic = SE_LUN_LINK_MAGIC; lun->lun_status = TRANSPORT_LUN_STATUS_FREE; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0686d61adeae..f687892b00be 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1341,6 +1341,7 @@ struct se_device *transport_add_device_to_core_hba( dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; + dev->dev_link_magic = SE_DEV_LINK_MAGIC; INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); @@ -1748,7 +1749,8 @@ static void target_complete_tmr_failure(struct work_struct *work) se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); - transport_generic_free_cmd(se_cmd, 0); + + transport_cmd_check_stop_to_fabric(se_cmd); } /** @@ -3167,15 +3169,20 @@ static int transport_generic_cmd_sequencer( /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } - + /* + * For the overflow case keep the existing fabric provided + * ->data_length. Otherwise for the underflow case, reset + * ->data_length to the smaller SCSI expected data transfer + * length. + */ if (size > cmd->data_length) { cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; cmd->residual_count = (size - cmd->data_length); } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - size); + cmd->data_length = size; } - cmd->data_length = size; } if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && @@ -3675,9 +3682,9 @@ transport_generic_get_mem(struct se_cmd *cmd) return 0; out: - while (i >= 0) { - __free_page(sg_page(&cmd->t_data_sg[i])); + while (i > 0) { i--; + __free_page(sg_page(&cmd->t_data_sg[i])); } kfree(cmd->t_data_sg); cmd->t_data_sg = NULL; diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 87901fa74dd7..9249998a837a 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -356,11 +356,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, tport = ft_tport_create(rdata->local_port); if (!tport) - return 0; /* not a target for this local port */ + goto not_target; /* not a target for this local port */ acl = ft_acl_get(tport->tpg, rdata); if (!acl) - return 0; + goto not_target; /* no target for this remote */ if (!rspp) goto fill; @@ -397,12 +397,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, /* * OR in our service parameters with other provider (initiator), if any. - * TBD XXX - indicate RETRY capability? */ fill: fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_RETRY; spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); return FC_SPP_RESP_ACK; + +not_target: + fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_TARG_FCN; + spp->spp_params = htonl(fcp_parm); + return 0; } /** @@ -431,7 +437,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu) { struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); - transport_deregister_session(sess->se_sess); kfree(sess); } @@ -439,6 +444,7 @@ static void ft_sess_free(struct kref *kref) { struct ft_sess *sess = container_of(kref, struct ft_sess, kref); + transport_deregister_session(sess->se_sess); call_rcu(&sess->rcu, ft_sess_rcu_free); } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index c43b683b6eb8..4a418e44d562 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -875,7 +875,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { - dlci->skb = skb_dequeue(&dlci->skb_list); + dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) return 0; first = 1; @@ -899,8 +899,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, /* FIXME: need a timer or something to kick this so it can't get stuck with no work outstanding and no buffer free */ - if (msg == NULL) + if (msg == NULL) { + skb_queue_tail(&dlci->skb_list, dlci->skb); + dlci->skb = NULL; return -ENOMEM; + } dp = msg->data; if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ @@ -971,16 +974,19 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm) static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { unsigned long flags; + int sweep; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ + sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { if (dlci->net) gsm_dlci_data_output_framed(dlci->gsm, dlci); else gsm_dlci_data_output(dlci->gsm, dlci); - } else if (dlci->gsm->tx_bytes < TX_THRESH_LO) - gsm_dlci_data_sweep(dlci->gsm); + } + if (sweep) + gsm_dlci_data_sweep(dlci->gsm); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } @@ -1190,6 +1196,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { u8 buf[1]; + unsigned long flags; + switch (command) { case CMD_CLD: { struct gsm_dlci *dlci = gsm->dlci[0]; @@ -1215,7 +1223,9 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, gsm->constipated = 0; gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); /* Kick the link in case it is idling */ + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm); + spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_MSC: /* Out of band modem line change indicator for a DLCI */ @@ -1682,6 +1692,8 @@ static inline void dlci_put(struct gsm_dlci *dlci) kref_put(&dlci->ref, gsm_dlci_free); } +static void gsm_destroy_network(struct gsm_dlci *dlci); + /** * gsm_dlci_release - release DLCI * @dlci: DLCI to destroy @@ -1695,9 +1707,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) { struct tty_struct *tty = tty_port_tty_get(&dlci->port); if (tty) { + mutex_lock(&dlci->mutex); + gsm_destroy_network(dlci); + mutex_unlock(&dlci->mutex); + + /* tty_vhangup needs the tty_lock, so unlock and + relock after doing the hangup. */ + tty_unlock(); tty_vhangup(tty); + tty_lock(); + tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); } + dlci->state = DLCI_CLOSED; dlci_put(dlci); } @@ -2377,12 +2399,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty) /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm); if (gsm->tx_bytes < TX_THRESH_LO) { - spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_data_sweep(gsm); - spin_unlock_irqrestore(&gsm->tx_lock, flags); } + spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** @@ -2889,6 +2911,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp) gsm = gsm_mux[mux]; if (gsm->dead) return -EL2HLT; + /* If DLCI 0 is not yet fully open return an error. This is ok from a locking + perspective as we don't have to worry about this if DLCI0 is lost */ + if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) + return -EL2NSYNC; dlci = gsm->dlci[line]; if (dlci == NULL) dlci = gsm_dlci_alloc(gsm, line); @@ -2919,6 +2945,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp) if (dlci == NULL) return; + if (dlci->state == DLCI_CLOSED) + return; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); @@ -2937,6 +2965,8 @@ out: static void gsmtty_hangup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return; tty_port_hangup(&dlci->port); gsm_dlci_begin_close(dlci); } @@ -2944,9 +2974,12 @@ static void gsmtty_hangup(struct tty_struct *tty) static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, int len) { + int sent; struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; /* Stuff the bytes into the fifo queue */ - int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); + sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); /* Need to kick the channel */ gsm_dlci_data_kick(dlci); return sent; @@ -2955,18 +2988,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, static int gsmtty_write_room(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; return TX_SIZE - kfifo_len(dlci->fifo); } static int gsmtty_chars_in_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; return kfifo_len(dlci->fifo); } static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it @@ -2985,6 +3024,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) static int gsmtty_tiocmget(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; return dlci->modem_rx; } @@ -2994,6 +3035,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty, struct gsm_dlci *dlci = tty->driver_data; unsigned int modem_tx = dlci->modem_tx; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; modem_tx &= ~clear; modem_tx |= set; @@ -3012,6 +3055,8 @@ static int gsmtty_ioctl(struct tty_struct *tty, struct gsm_netconfig nc; int index; + if (dlci->state == DLCI_CLOSED) + return -EINVAL; switch (cmd) { case GSMIOC_ENABLE_NET: if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) @@ -3038,6 +3083,9 @@ static int gsmtty_ioctl(struct tty_struct *tty, static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) { + struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return; /* For the moment its fixed. In actual fact the speed information for the virtual channel can be propogated in both directions by the RPN control message. This however rapidly gets nasty as we @@ -3049,6 +3097,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) static void gsmtty_throttle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return; if (tty->termios->c_cflag & CRTSCTS) dlci->modem_tx &= ~TIOCM_DTR; dlci->throttled = 1; @@ -3059,6 +3109,8 @@ static void gsmtty_throttle(struct tty_struct *tty) static void gsmtty_unthrottle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + if (dlci->state == DLCI_CLOSED) + return; if (tty->termios->c_cflag & CRTSCTS) dlci->modem_tx |= TIOCM_DTR; dlci->throttled = 0; @@ -3070,6 +3122,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) { struct gsm_dlci *dlci = tty->driver_data; int encode = 0; /* Off */ + if (dlci->state == DLCI_CLOSED) + return -EINVAL; if (state == -1) /* "On indefinitely" - we can't encode this properly */ diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 94b6eda87afd..2303a02e9dc5 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1727,7 +1727,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, do_it_again: - BUG_ON(!tty->read_buf); + if (WARN_ON(!tty->read_buf)) + return -EAGAIN; c = job_control(tty, file); if (c < 0) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index f574eef3075f..b6dc908a24c6 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p) } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { /* Clear the USR and write the LCR again. */ (void)p->serial_in(p, UART_USR); - p->serial_out(p, d->last_lcr, UART_LCR); + p->serial_out(p, UART_LCR, d->last_lcr); return 1; } diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 3614973c9990..40747feed34c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1125,6 +1125,8 @@ pci_xr17c154_setup(struct serial_private *priv, #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208 #define PCI_SUBDEVICE_ID_POCTAL232 0x0308 #define PCI_SUBDEVICE_ID_POCTAL422 0x0408 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 @@ -3187,8 +3189,11 @@ static struct pci_device_id serial_pci_tbl[] = { * For now just used the hex ID 0x950a. */ { PCI_VENDOR_ID_OXSEMI, 0x950a, - PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0, - pbn_b0_2_115200 }, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00, + 0, 0, pbn_b0_2_115200 }, + { PCI_VENDOR_ID_OXSEMI, 0x950a, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30, + 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_OXSEMI, 0x950a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_1130000 }, diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 3d569cd68f58..b69356c227c4 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1654,13 +1654,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, old_cr &= ~ST_UART011_CR_OVSFACT; } + /* + * Workaround for the ST Micro oversampling variants to + * increase the bitrate slightly, by lowering the divisor, + * to avoid delayed sampling of start bit at high speeds, + * else we see data corruption. + */ + if (uap->vendor->oversampling) { + if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) + quot -= 1; + else if ((baud > 3250000) && (quot > 2)) + quot -= 2; + } /* Set baud rate */ writew(quot & 0x3f, port->membase + UART011_FBRD); writew(quot >> 6, port->membase + UART011_IBRD); /* * ----------v----------v----------v----------v----- - * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L + * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER + * UART011_FBRD & UART011_IBRD. * ----------^----------^----------^----------^----- */ writew(lcr_h, port->membase + uap->lcrh_rx); diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 144cd3987d4c..17f587c5dbab 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -552,6 +552,7 @@ static void ifx_port_shutdown(struct tty_port *port) container_of(port, struct ifx_spi_device, tty_port); mrdy_set_low(ifx_dev); + del_timer(&ifx_dev->spi_timer); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); tasklet_kill(&ifx_dev->io_work_tasklet); } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index e7feceeebc2f..0de7ed788631 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -756,6 +756,7 @@ static int imx_startup(struct uart_port *port) } } + spin_lock_irqsave(&sport->port.lock, flags); /* * Finally, clear and enable interrupts */ @@ -809,7 +810,6 @@ static int imx_startup(struct uart_port *port) /* * Enable modem status interrupts */ - spin_lock_irqsave(&sport->port.lock,flags); imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock,flags); @@ -839,10 +839,13 @@ static void imx_shutdown(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; + unsigned long flags; + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); writel(temp, sport->port.membase + UCR2); + spin_unlock_irqrestore(&sport->port.lock, flags); if (USE_IRDA(sport)) { struct imxuart_platform_data *pdata; @@ -871,12 +874,14 @@ static void imx_shutdown(struct uart_port *port) * Disable all interrupts, port and break condition. */ + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); if (USE_IRDA(sport)) temp &= ~(UCR1_IREN); writel(temp, sport->port.membase + UCR1); + spin_unlock_irqrestore(&sport->port.lock, flags); } static void @@ -1219,6 +1224,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count) struct imx_port *sport = imx_ports[co->index]; struct imx_port_ucrs old_ucr; unsigned int ucr1; + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); /* * First, save UCR1/2/3 and then disable interrupts @@ -1244,6 +1252,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) while (!(readl(sport->port.membase + USR2) & USR2_TXDC)); imx_port_ucrs_restore(&sport->port, &old_ucr); + + spin_unlock_irqrestore(&sport->port.lock, flags); } /* diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index c2816f494807..7d4751474da1 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -252,6 +252,9 @@ struct eg20t_port { dma_addr_t rx_buf_dma; struct dentry *debugfs; + + /* protect the eg20t_port private structure and io access to membase */ + spinlock_t lock; }; /** @@ -754,7 +757,8 @@ static void pch_dma_rx_complete(void *arg) tty_flip_buffer_push(tty); tty_kref_put(tty); async_tx_ack(priv->desc_rx); - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); } static void pch_dma_tx_complete(void *arg) @@ -809,7 +813,8 @@ static int handle_rx_to(struct eg20t_port *priv) int rx_size; int ret; if (!priv->start_rx) { - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); return 0; } buf = &priv->rxbuf; @@ -1056,7 +1061,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) unsigned int iid; unsigned long flags; - spin_lock_irqsave(&priv->port.lock, flags); + spin_lock_irqsave(&priv->lock, flags); handled = 0; while ((iid = pch_uart_hal_get_iid(priv)) > 1) { switch (iid) { @@ -1071,11 +1076,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) case PCH_UART_IID_RDR: /* Received Data Ready */ if (priv->use_dma) { pch_uart_hal_disable_interrupt(priv, - PCH_UART_HAL_RX_INT); + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); ret = dma_handle_rx(priv); if (!ret) pch_uart_hal_enable_interrupt(priv, - PCH_UART_HAL_RX_INT); + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); } else { ret = handle_rx(priv); } @@ -1107,7 +1114,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) priv->int_dis_flag = 0; } - spin_unlock_irqrestore(&priv->port.lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); return IRQ_RETVAL(handled); } @@ -1199,7 +1206,8 @@ static void pch_uart_stop_rx(struct uart_port *port) struct eg20t_port *priv; priv = container_of(port, struct eg20t_port, port); priv->start_rx = 0; - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); priv->int_dis_flag = 1; } @@ -1218,9 +1226,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl) unsigned long flags; priv = container_of(port, struct eg20t_port, port); - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&priv->lock, flags); pch_uart_hal_set_break(priv, ctl); - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); } /* Grab any interrupt resources and initialise any low level driver state. */ @@ -1255,6 +1263,7 @@ static int pch_uart_startup(struct uart_port *port) break; case 16: fifo_size = PCH_UART_HAL_FIFO16; + break; case 1: default: fifo_size = PCH_UART_HAL_FIFO_DIS; @@ -1292,7 +1301,8 @@ static int pch_uart_startup(struct uart_port *port) pch_request_dma(port); priv->start_rx = 1; - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); uart_update_timeout(port, CS8, default_baud); return 0; @@ -1350,7 +1360,7 @@ static void pch_uart_set_termios(struct uart_port *port, stb = PCH_UART_HAL_STB1; if (termios->c_cflag & PARENB) { - if (!(termios->c_cflag & PARODD)) + if (termios->c_cflag & PARODD) parity = PCH_UART_HAL_PARITY_ODD; else parity = PCH_UART_HAL_PARITY_EVEN; @@ -1368,7 +1378,8 @@ static void pch_uart_set_termios(struct uart_port *port, baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&priv->lock, flags); + spin_lock(&port->lock); uart_update_timeout(port, termios->c_cflag, baud); rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb); @@ -1381,7 +1392,8 @@ static void pch_uart_set_termios(struct uart_port *port, tty_termios_encode_baud_rate(termios, baud, baud); out: - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock(&port->lock); + spin_unlock_irqrestore(&priv->lock, flags); } static const char *pch_uart_type(struct uart_port *port) @@ -1531,8 +1543,9 @@ pch_console_write(struct console *co, const char *s, unsigned int count) { struct eg20t_port *priv; unsigned long flags; + int priv_locked = 1; + int port_locked = 1; u8 ier; - int locked = 1; priv = pch_uart_ports[co->index]; @@ -1540,12 +1553,16 @@ pch_console_write(struct console *co, const char *s, unsigned int count) local_irq_save(flags); if (priv->port.sysrq) { - /* serial8250_handle_port() already took the lock */ - locked = 0; + spin_lock(&priv->lock); + /* serial8250_handle_port() already took the port lock */ + port_locked = 0; } else if (oops_in_progress) { - locked = spin_trylock(&priv->port.lock); - } else + priv_locked = spin_trylock(&priv->lock); + port_locked = spin_trylock(&priv->port.lock); + } else { + spin_lock(&priv->lock); spin_lock(&priv->port.lock); + } /* * First save the IER then disable the interrupts @@ -1563,8 +1580,10 @@ pch_console_write(struct console *co, const char *s, unsigned int count) wait_for_xmitr(priv, BOTH_EMPTY); iowrite8(ier, priv->membase + UART_IER); - if (locked) + if (port_locked) spin_unlock(&priv->port.lock); + if (priv_locked) + spin_unlock(&priv->lock); local_irq_restore(flags); } @@ -1662,6 +1681,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, pci_enable_msi(pdev); pci_set_master(pdev); + spin_lock_init(&priv->lock); + iobase = pci_resource_start(pdev, 0); mapbase = pci_resource_start(pdev, 1); priv->mapbase = mapbase; diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index a1b9a2f68567..f8d03da536dd 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -EINTR; + return -ERESTARTSYS; } tty_set_termios(tty, &tmp_termios); @@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -EINTR; + return -ERESTARTSYS; } mutex_lock(&tty->termios_mutex); diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 3b0c4e32ed7b..a6d5d51fcbc5 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -1053,13 +1053,10 @@ static int kbd_update_leds_helper(struct input_handle *handle, void *data) */ int vt_get_leds(int console, int flag) { - unsigned long flags; struct kbd_struct * kbd = kbd_table + console; int ret; - spin_lock_irqsave(&kbd_event_lock, flags); ret = vc_kbd_led(kbd, flag); - spin_unlock_irqrestore(&kbd_event_lock, flags); return ret; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 2156188db4a6..268294c71c11 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -656,7 +656,7 @@ static inline void save_screen(struct vc_data *vc) * Redrawing of screen */ -static void clear_buffer_attributes(struct vc_data *vc) +void clear_buffer_attributes(struct vc_data *vc) { unsigned short *p = (unsigned short *)vc->vc_origin; int count = vc->vc_screenbuf_size / 2; @@ -3017,7 +3017,7 @@ int __init vty_init(const struct file_operations *console_fops) static struct class *vtconsole_class; -static int bind_con_driver(const struct consw *csw, int first, int last, +static int do_bind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; @@ -3028,7 +3028,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, if (!try_module_get(owner)) return -ENODEV; - console_lock(); + WARN_CONSOLE_UNLOCKED(); /* check if driver is registered */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3113,11 +3113,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last, retval = 0; err: - console_unlock(); module_put(owner); return retval; }; + +static int bind_con_driver(const struct consw *csw, int first, int last, + int deflt) +{ + int ret; + + console_lock(); + ret = do_bind_con_driver(csw, first, last, deflt); + console_unlock(); + return ret; +} + #ifdef CONFIG_VT_HW_CONSOLE_BINDING static int con_is_graphics(const struct consw *csw, int first, int last) { @@ -3154,6 +3165,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last) */ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) { + int retval; + + console_lock(); + retval = do_unbind_con_driver(csw, first, last, deflt); + console_unlock(); + return retval; +} +EXPORT_SYMBOL(unbind_con_driver); + +/* unlocked version of unbind_con_driver() */ +int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt) +{ struct module *owner = csw->owner; const struct consw *defcsw = NULL; struct con_driver *con_driver = NULL, *con_back = NULL; @@ -3162,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!try_module_get(owner)) return -ENODEV; - console_lock(); + WARN_CONSOLE_UNLOCKED(); /* check if driver is registered and if it is unbindable */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3175,10 +3198,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } } - if (retval) { - console_unlock(); + if (retval) goto err; - } retval = -ENODEV; @@ -3194,15 +3215,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } } - if (retval) { - console_unlock(); + if (retval) goto err; - } - if (!con_is_bound(csw)) { - console_unlock(); + if (!con_is_bound(csw)) goto err; - } first = max(first, con_driver->first); last = min(last, con_driver->last); @@ -3229,15 +3246,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!con_is_bound(csw)) con_driver->flag &= ~CON_DRIVER_FLAG_INIT; - console_unlock(); /* ignore return value, binding should not fail */ - bind_con_driver(defcsw, first, last, deflt); + do_bind_con_driver(defcsw, first, last, deflt); err: module_put(owner); return retval; } -EXPORT_SYMBOL(unbind_con_driver); +EXPORT_SYMBOL_GPL(do_unbind_con_driver); static int vt_bind(struct con_driver *con) { @@ -3475,6 +3491,19 @@ int con_debug_enter(struct vc_data *vc) kdb_set(2, setargs); } } + if (vc->vc_cols < 999) { + int colcount; + char cols[4]; + const char *setargs[3] = { + "set", + "COLUMNS", + cols, + }; + if (kdbgetintenv(setargs[0], &colcount)) { + snprintf(cols, 4, "%i", vc->vc_cols); + kdb_set(2, setargs); + } + } #endif /* CONFIG_KGDB_KDB */ return ret; } @@ -3509,28 +3538,18 @@ int con_debug_leave(void) } EXPORT_SYMBOL_GPL(con_debug_leave); -/** - * register_con_driver - register console driver to console layer - * @csw: console driver - * @first: the first console to take over, minimum value is 0 - * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 - * - * DESCRIPTION: This function registers a console driver which can later - * bind to a range of consoles specified by @first and @last. It will - * also initialize the console driver by calling con_startup(). - */ -int register_con_driver(const struct consw *csw, int first, int last) +static int do_register_con_driver(const struct consw *csw, int first, int last) { struct module *owner = csw->owner; struct con_driver *con_driver; const char *desc; int i, retval = 0; + WARN_CONSOLE_UNLOCKED(); + if (!try_module_get(owner)) return -ENODEV; - console_lock(); - for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; @@ -3583,10 +3602,29 @@ int register_con_driver(const struct consw *csw, int first, int last) } err: - console_unlock(); module_put(owner); return retval; } + +/** + * register_con_driver - register console driver to console layer + * @csw: console driver + * @first: the first console to take over, minimum value is 0 + * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 + * + * DESCRIPTION: This function registers a console driver which can later + * bind to a range of consoles specified by @first and @last. It will + * also initialize the console driver by calling con_startup(). + */ +int register_con_driver(const struct consw *csw, int first, int last) +{ + int retval; + + console_lock(); + retval = do_register_con_driver(csw, first, last); + console_unlock(); + return retval; +} EXPORT_SYMBOL(register_con_driver); /** @@ -3602,9 +3640,18 @@ EXPORT_SYMBOL(register_con_driver); */ int unregister_con_driver(const struct consw *csw) { - int i, retval = -ENODEV; + int retval; console_lock(); + retval = do_unregister_con_driver(csw); + console_unlock(); + return retval; +} +EXPORT_SYMBOL(unregister_con_driver); + +int do_unregister_con_driver(const struct consw *csw) +{ + int i, retval = -ENODEV; /* cannot unregister a bound driver */ if (con_is_bound(csw)) @@ -3630,27 +3677,53 @@ int unregister_con_driver(const struct consw *csw) } } err: - console_unlock(); return retval; } -EXPORT_SYMBOL(unregister_con_driver); +EXPORT_SYMBOL_GPL(do_unregister_con_driver); + +/* + * If we support more console drivers, this function is used + * when a driver wants to take over some existing consoles + * and become default driver for newly opened ones. + * + * take_over_console is basically a register followed by unbind + */ +int do_take_over_console(const struct consw *csw, int first, int last, int deflt) +{ + int err; + + err = do_register_con_driver(csw, first, last); + /* + * If we get an busy error we still want to bind the console driver + * and return success, as we may have unbound the console driver + * but not unregistered it. + */ + if (err == -EBUSY) + err = 0; + if (!err) + do_bind_con_driver(csw, first, last, deflt); + + return err; +} +EXPORT_SYMBOL_GPL(do_take_over_console); /* * If we support more console drivers, this function is used * when a driver wants to take over some existing consoles * and become default driver for newly opened ones. * - * take_over_console is basically a register followed by unbind + * take_over_console is basically a register followed by unbind */ int take_over_console(const struct consw *csw, int first, int last, int deflt) { int err; err = register_con_driver(csw, first, last); - /* if we get an busy error we still want to bind the console driver + /* + * If we get an busy error we still want to bind the console driver * and return success, as we may have unbound the console driver - Â * but not unregistered it. - */ + * but not unregistered it. + */ if (err == -EBUSY) err = 0; if (!err) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 4e33418faa52..a6a435e52728 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -813,6 +813,10 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info) tmp.flags = ASYNC_LOW_LATENCY; tmp.xmit_fifo_size = acm->writesize; tmp.baud_base = le32_to_cpu(acm->line.dwDTERate); + tmp.close_delay = acm->port.close_delay / 10; + tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : + acm->port.closing_wait / 10; if (copy_to_user(info, &tmp, sizeof(tmp))) return -EFAULT; @@ -820,6 +824,37 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info) return 0; } +static int set_serial_info(struct acm *acm, + struct serial_struct __user *newinfo) +{ + struct serial_struct new_serial; + unsigned int closing_wait, close_delay; + int retval = 0; + + if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) + return -EFAULT; + + close_delay = new_serial.close_delay * 10; + closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? + ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; + + mutex_lock(&acm->port.mutex); + + if (!capable(CAP_SYS_ADMIN)) { + if ((close_delay != acm->port.close_delay) || + (closing_wait != acm->port.closing_wait)) + retval = -EPERM; + else + retval = -EOPNOTSUPP; + } else { + acm->port.close_delay = close_delay; + acm->port.closing_wait = closing_wait; + } + + mutex_unlock(&acm->port.mutex); + return retval; +} + static int acm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { @@ -830,6 +865,9 @@ static int acm_tty_ioctl(struct tty_struct *tty, case TIOCGSERIAL: /* gets serial port data */ rv = get_serial_info(acm, (struct serial_struct __user *) arg); break; + case TIOCSSERIAL: + rv = set_serial_info(acm, (struct serial_struct __user *) arg); + break; } return rv; @@ -843,10 +881,6 @@ static const __u32 acm_tty_speed[] = { 2500000, 3000000, 3500000, 4000000 }; -static const __u8 acm_tty_size[] = { - 5, 6, 7, 8 -}; - static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old) { @@ -860,7 +894,21 @@ static void acm_tty_set_termios(struct tty_struct *tty, newline.bParityType = termios->c_cflag & PARENB ? (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0; - newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; + switch (termios->c_cflag & CSIZE) { + case CS5: + newline.bDataBits = 5; + break; + case CS6: + newline.bDataBits = 6; + break; + case CS7: + newline.bDataBits = 7; + break; + case CS8: + default: + newline.bDataBits = 8; + break; + } /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); @@ -1133,7 +1181,8 @@ skip_normal_probe: } - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) + if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || + control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; @@ -1265,7 +1314,7 @@ made_compressed_probe: if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, @@ -1630,6 +1679,12 @@ static const struct usb_device_id acm_ids[] = { Maybe we should define a new quirk for this. */ }, + { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */ + .driver_info = NO_UNION_NORMAL, + }, + { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */ + .driver_info = NO_UNION_NORMAL, + }, { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 01d247e88081..524fe240fcd1 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -134,12 +134,14 @@ static struct usb_driver wdm_driver; /* return intfdata if we own the interface, else look up intf in the list */ static struct wdm_device *wdm_find_device(struct usb_interface *intf) { - struct wdm_device *desc = NULL; + struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf == intf) - break; + goto found; + desc = NULL; +found: spin_unlock(&wdm_device_list_lock); return desc; @@ -147,12 +149,14 @@ static struct wdm_device *wdm_find_device(struct usb_interface *intf) static struct wdm_device *wdm_find_device_by_minor(int minor) { - struct wdm_device *desc = NULL; + struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf->minor == minor) - break; + goto found; + desc = NULL; +found: spin_unlock(&wdm_device_list_lock); return desc; diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index d95696584762..3440812b4a84 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, /* print devices for all busses */ list_for_each_entry(bus, &usb_bus_list, bus_list) { /* recurse through all children of the root hub */ - if (!bus->root_hub) + if (!bus_to_hcd(bus)->rh_registered) continue; usb_lock_device(bus->root_hub); ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 140d3e11f212..e2cc8df3d87b 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1002,10 +1002,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (retval) { dev_err (parent_dev, "can't register root hub for %s, %d\n", dev_name(&usb_dev->dev), retval); - } - mutex_unlock(&usb_bus_list_lock); - - if (retval == 0) { + } else { spin_lock_irq (&hcd_root_hub_lock); hcd->rh_registered = 1; spin_unlock_irq (&hcd_root_hub_lock); @@ -1014,6 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (HCD_DEAD(hcd)) usb_hc_died (hcd); /* This time clean up */ } + mutex_unlock(&usb_bus_list_lock); return retval; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index e727b876726c..67dda0dbf81b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -507,13 +507,16 @@ static void hub_tt_work(struct work_struct *work) int limit = 100; spin_lock_irqsave (&hub->tt.lock, flags); - while (--limit && !list_empty (&hub->tt.clear_list)) { + while (!list_empty(&hub->tt.clear_list)) { struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; const struct hc_driver *drv; int status; + if (!hub->quiescing && --limit < 0) + break; + next = hub->tt.clear_list.next; clear = list_entry (next, struct usb_tt_clear, clear_list); list_del (&clear->clear_list); @@ -641,6 +644,60 @@ static int hub_hub_status(struct usb_hub *hub, return ret; } +static int hub_set_port_link_state(struct usb_hub *hub, int port1, + unsigned int link_status) +{ + return set_port_feature(hub->hdev, + port1 | (link_status << 3), + USB_PORT_FEAT_LINK_STATE); +} + +/* + * If USB 3.0 ports are placed into the Disabled state, they will no longer + * detect any device connects or disconnects. This is generally not what the + * USB core wants, since it expects a disabled port to produce a port status + * change event when a new device connects. + * + * Instead, set the link state to Disabled, wait for the link to settle into + * that state, clear any change bits, and then put the port into the RxDetect + * state. + */ +static int hub_usb3_port_disable(struct usb_hub *hub, int port1) +{ + int ret; + int total_time; + u16 portchange, portstatus; + + if (!hub_is_superspeed(hub->hdev)) + return -EINVAL; + + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); + if (ret) { + dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", + port1, ret); + return ret; + } + + /* Wait for the link to enter the disabled state. */ + for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { + ret = hub_port_status(hub, port1, &portstatus, &portchange); + if (ret < 0) + return ret; + + if ((portstatus & USB_PORT_STAT_LINK_STATE) == + USB_SS_PORT_LS_SS_DISABLED) + break; + if (total_time >= HUB_DEBOUNCE_TIMEOUT) + break; + msleep(HUB_DEBOUNCE_STEP); + } + if (total_time >= HUB_DEBOUNCE_TIMEOUT) + dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n", + port1, total_time); + + return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); +} + static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_device *hdev = hub->hdev; @@ -649,8 +706,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) if (hdev->children[port1-1] && set_state) usb_set_device_state(hdev->children[port1-1], USB_STATE_NOTATTACHED); - if (!hub->error && !hub_is_superspeed(hub->hdev)) - ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); + if (!hub->error) { + if (hub_is_superspeed(hub->hdev)) + ret = hub_usb3_port_disable(hub, port1); + else + ret = clear_port_feature(hdev, port1, + USB_PORT_FEAT_ENABLE); + } if (ret) dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", port1, ret); @@ -978,7 +1040,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) - cancel_work_sync(&hub->tt.clear_work); + flush_work_sync(&hub->tt.clear_work); } /* caller has locked the hub device */ @@ -2106,7 +2168,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define HUB_SHORT_RESET_TIME 10 #define HUB_BH_RESET_TIME 50 #define HUB_LONG_RESET_TIME 200 -#define HUB_RESET_TIMEOUT 500 +#define HUB_RESET_TIMEOUT 800 static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm); @@ -2141,6 +2203,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if (ret < 0) return ret; + /* The port state is unknown until the reset completes. */ + if ((portstatus & USB_PORT_STAT_RESET)) + goto delay; + /* * Some buggy devices require a warm reset to be issued even * when the port appears not to be connected. @@ -2186,11 +2252,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if ((portchange & USB_PORT_STAT_C_CONNECTION)) return -ENOTCONN; - /* if we`ve finished resetting, then break out of - * the loop - */ - if (!(portstatus & USB_PORT_STAT_RESET) && - (portstatus & USB_PORT_STAT_ENABLE)) { + if ((portstatus & USB_PORT_STAT_ENABLE)) { if (hub_is_wusb(hub)) udev->speed = USB_SPEED_WIRELESS; else if (hub_is_superspeed(hub->hdev)) @@ -2204,10 +2266,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, return 0; } } else { - if (portchange & USB_PORT_STAT_C_BH_RESET) - return 0; + if (!(portstatus & USB_PORT_STAT_CONNECTION) || + hub_port_warm_reset_required(hub, + portstatus)) + return -ENOTCONN; + + return 0; } +delay: /* switch to the long delay after two short delay failures */ if (delay_time >= 2 * HUB_SHORT_RESET_TIME) delay = HUB_LONG_RESET_TIME; @@ -2231,14 +2298,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1, msleep(10 + 40); update_devnum(udev, 0); hcd = bus_to_hcd(udev->bus); - if (hcd->driver->reset_device) { - *status = hcd->driver->reset_device(hcd, udev); - if (*status < 0) { - dev_err(&udev->dev, "Cannot reset " - "HCD device state\n"); - break; - } - } + /* The xHC may think the device is already reset, + * so ignore the status. + */ + if (hcd->driver->reset_device) + hcd->driver->reset_device(hcd, udev); } /* FALL THROUGH */ case -ENOTCONN: @@ -2246,16 +2310,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1, clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); /* FIXME need disconnect() for NOTATTACHED device */ - if (warm) { + if (hub_is_superspeed(hub->hdev)) { clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); - } else { + } + if (!warm) usb_set_device_state(udev, *status ? USB_STATE_NOTATTACHED : USB_STATE_DEFAULT); - } break; } } @@ -2399,6 +2463,23 @@ static int check_port_resume_type(struct usb_device *udev, } #ifdef CONFIG_USB_SUSPEND +/* + * usb_disable_function_remotewakeup - disable usb3.0 + * device's function remote wakeup + * @udev: target device + * + * Assume there's only one function on the USB 3.0 + * device and disable remote wake for the first + * interface. FIXME if the interface association + * descriptor shows there's more than one function. + */ +static int usb_disable_function_remotewakeup(struct usb_device *udev) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, + USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); +} /* * usb_port_suspend - suspend a usb device's upstream port @@ -2505,12 +2586,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); /* paranoia: "should not happen" */ - if (udev->do_remote_wakeup) - (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), - USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, - USB_DEVICE_REMOTE_WAKEUP, 0, - NULL, 0, - USB_CTRL_SET_TIMEOUT); + if (udev->do_remote_wakeup) { + if (!hub_is_superspeed(hub->hdev)) { + (void) usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, + USB_RECIP_DEVICE, + USB_DEVICE_REMOTE_WAKEUP, 0, + NULL, 0, + USB_CTRL_SET_TIMEOUT); + } else + (void) usb_disable_function_remotewakeup(udev); + + } /* Try to enable USB2 hardware LPM again */ if (udev->usb2_hw_lpm_capable == 1) @@ -2545,7 +2633,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) static int finish_port_resume(struct usb_device *udev) { int status = 0; - u16 devstatus; + u16 devstatus = 0; /* caller owns the udev device lock */ dev_dbg(&udev->dev, "%s\n", @@ -2590,21 +2678,37 @@ static int finish_port_resume(struct usb_device *udev) if (status) { dev_dbg(&udev->dev, "gone after usb resume? status %d\n", status); - } else if (udev->actconfig) { - le16_to_cpus(&devstatus); - if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { - status = usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - USB_REQ_CLEAR_FEATURE, + /* + * There are a few quirky devices which violate the standard + * by claiming to have remote wakeup enabled after a reset, + * which crash if the feature is cleared, hence check for + * udev->reset_resume + */ + } else if (udev->actconfig && !udev->reset_resume) { + if (!hub_is_superspeed(udev->parent)) { + le16_to_cpus(&devstatus); + if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) + status = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, - USB_DEVICE_REMOTE_WAKEUP, 0, - NULL, 0, - USB_CTRL_SET_TIMEOUT); - if (status) - dev_dbg(&udev->dev, - "disable remote wakeup, status %d\n", - status); + USB_DEVICE_REMOTE_WAKEUP, 0, + NULL, 0, + USB_CTRL_SET_TIMEOUT); + } else { + status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, + &devstatus); + le16_to_cpus(&devstatus); + if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP + | USB_INTRF_STAT_FUNC_RW)) + status = + usb_disable_function_remotewakeup(udev); } + + if (status) + dev_dbg(&udev->dev, + "disable remote wakeup, status %d\n", + status); status = 0; } return status; @@ -3771,9 +3875,14 @@ static void hub_events(void) * SS.Inactive state. */ if (hub_port_warm_reset_required(hub, portstatus)) { + int status; + dev_dbg(hub_dev, "warm reset port %d\n", i); - hub_port_reset(hub, i, NULL, + status = hub_port_reset(hub, i, NULL, HUB_BH_RESET_TIME, true); + if (status < 0) + hub_port_disable(hub, i, 1); + connect_change = 0; } if (connect_change) diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index ef116a55afa4..ab11ca3c79a5 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1770,28 +1770,8 @@ free_interfaces: goto free_interfaces; } - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_CONFIGURATION, 0, configuration, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { - /* All the old state is gone, so what else can we do? - * The device is probably useless now anyway. - */ - cp = NULL; - } - - dev->actconfig = cp; - if (!cp) { - usb_set_device_state(dev, USB_STATE_ADDRESS); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - usb_autosuspend_device(dev); - goto free_interfaces; - } - mutex_unlock(hcd->bandwidth_mutex); - usb_set_device_state(dev, USB_STATE_CONFIGURED); - - /* Initialize the new interface structures and the + /* + * Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { @@ -1835,6 +1815,35 @@ free_interfaces: } kfree(new_interfaces); + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + USB_REQ_SET_CONFIGURATION, 0, configuration, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); + if (ret < 0 && cp) { + /* + * All the old state is gone, so what else can we do? + * The device is probably useless now anyway. + */ + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + for (i = 0; i < nintf; ++i) { + usb_disable_interface(dev, cp->interface[i], true); + put_device(&cp->interface[i]->dev); + cp->interface[i] = NULL; + } + cp = NULL; + } + + dev->actconfig = cp; + mutex_unlock(hcd->bandwidth_mutex); + + if (!cp) { + usb_set_device_state(dev, USB_STATE_ADDRESS); + + /* Leave LPM disabled while the device is unconfigured. */ + usb_autosuspend_device(dev); + return ret; + } + usb_set_device_state(dev, USB_STATE_CONFIGURED); + if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 32d3adc315f5..8b2a9d83090e 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Microchip Joss Optical infrared touchboard device */ + { USB_DEVICE(0x04d8, 0x000c), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 3584a169886f..e4d87d700554 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -569,7 +569,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); - dwc->ep0_bounced = false; } else { transferred = ur->length - length; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 5255fe975ea1..f62629b029fc 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -238,8 +238,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->request.status == -EINPROGRESS) req->request.status = status; - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); + if (dwc->ep0_bounced && dep->number == 0) + dwc->ep0_bounced = false; + else + usb_gadget_unmap_request(&dwc->gadget, &req->request, + req->direction); dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", req, dep->name, req->request.actual, @@ -1450,6 +1453,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) if (epnum == 0 || epnum == 1) { dep->endpoint.maxpacket = 512; + dep->endpoint.maxburst = 1; dep->endpoint.ops = &dwc3_gadget_ep0_ops; if (!epnum) dwc->gadget.ep0 = &dep->endpoint; @@ -1774,6 +1778,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); WARN_ON_ONCE(ret); dep->res_trans_idx = 0; + dep->flags &= ~DWC3_EP_BUSY; } } diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 9d7bcd910074..be6952e2fc5a 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -1735,7 +1735,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev) int retval; struct resource *res; - if (!dev->platform_data) { + if (!dev->platform_data && !pdev->dev.of_node) { /* small (so we copy it) but critical! */ DBG("missing platform_data\n"); return -ENODEV; diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index 170cbe89d9f8..9a7b436bd925 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -126,10 +126,7 @@ static const char ep0name[] = "ep0"; static const char *const ep_name[] = { ep0name, /* everyone has ep0 */ - /* act like a net2280: high speed, six configurable endpoints */ - "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f", - - /* or like pxa250: fifteen fixed function endpoints */ + /* act like a pxa250: fifteen fixed function endpoints */ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int", "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int", "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso", @@ -137,6 +134,10 @@ static const char *const ep_name[] = { /* or like sa1100: two fixed function endpoints */ "ep1out-bulk", "ep2in-bulk", + + /* and now some generic EPs so we have enough in multi config */ + "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in", + "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out", }; #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name) @@ -2505,10 +2506,8 @@ static int dummy_hcd_probe(struct platform_device *pdev) hs_hcd->has_tt = 1; retval = usb_add_hcd(hs_hcd, 0, 0); - if (retval != 0) { - usb_put_hcd(hs_hcd); - return retval; - } + if (retval) + goto put_usb2_hcd; if (mod_data.is_super_speed) { ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev, @@ -2527,6 +2526,8 @@ static int dummy_hcd_probe(struct platform_device *pdev) put_usb3_hcd: usb_put_hcd(ss_hcd); dealloc_usb2_hcd: + usb_remove_hcd(hs_hcd); +put_usb2_hcd: usb_put_hcd(hs_hcd); the_controller.hs_hcd = the_controller.ss_hcd = NULL; return retval; diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index 30b908f2a53d..672c66aad6b9 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c @@ -808,9 +808,9 @@ fail: /* we might as well release our claims on endpoints */ if (ecm->notify) ecm->notify->driver_data = NULL; - if (ecm->port.out_ep->desc) + if (ecm->port.out_ep) ecm->port.out_ep->driver_data = NULL; - if (ecm->port.in_ep->desc) + if (ecm->port.in_ep) ecm->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 1a7b2dd7d408..a9cf20522ffa 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c @@ -319,10 +319,9 @@ fail: if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); - /* we might as well release our claims on endpoints */ - if (eem->port.out_ep->desc) + if (eem->port.out_ep) eem->port.out_ep->driver_data = NULL; - if (eem->port.in_ep->desc) + if (eem->port.in_ep) eem->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c index 2f7e8f2930cc..1bf95964056d 100644 --- a/drivers/usb/gadget/f_midi.c +++ b/drivers/usb/gadget/f_midi.c @@ -416,6 +416,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f) midi->id = NULL; usb_free_descriptors(f->descriptors); + usb_free_descriptors(f->hs_descriptors); kfree(midi); } diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c index aab8eded045b..d7811ae29729 100644 --- a/drivers/usb/gadget/f_ncm.c +++ b/drivers/usb/gadget/f_ncm.c @@ -1259,9 +1259,9 @@ fail: /* we might as well release our claims on endpoints */ if (ncm->notify) ncm->notify->driver_data = NULL; - if (ncm->port.out_ep->desc) + if (ncm->port.out_ep) ncm->port.out_ep->driver_data = NULL; - if (ncm->port.in_ep->desc) + if (ncm->port.in_ep) ncm->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 965a6293206a..16512f921bc0 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -531,7 +531,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); if (!req) - goto err; + goto err_req; req->complete = pn_rx_complete; fp->out_reqv[i] = req; @@ -540,14 +540,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) /* Outgoing USB requests */ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); if (!fp->in_req) - goto err; + goto err_req; INFO(cdev, "USB CDC Phonet function\n"); INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, fp->out_ep->name, fp->in_ep->name); return 0; +err_req: + for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) + usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); err: + if (fp->out_ep) fp->out_ep->driver_data = NULL; if (fp->in_ep) diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 84ecc24c8c6c..4422c32ad1f9 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -800,9 +800,9 @@ fail: /* we might as well release our claims on endpoints */ if (rndis->notify) rndis->notify->driver_data = NULL; - if (rndis->port.out_ep->desc) + if (rndis->port.out_ep) rndis->port.out_ep->driver_data = NULL; - if (rndis->port.in_ep->desc) + if (rndis->port.in_ep) rndis->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c index 21ab474aca07..e5bb96628639 100644 --- a/drivers/usb/gadget/f_subset.c +++ b/drivers/usb/gadget/f_subset.c @@ -370,9 +370,9 @@ fail: usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ - if (geth->port.out_ep->desc) + if (geth->port.out_ep) geth->port.out_ep->driver_data = NULL; - if (geth->port.in_ep->desc) + if (geth->port.in_ep) geth->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c index 2022fe492148..a0abc65ecbea 100644 --- a/drivers/usb/gadget/f_uvc.c +++ b/drivers/usb/gadget/f_uvc.c @@ -335,7 +335,6 @@ uvc_register_video(struct uvc_device *uvc) return -ENOMEM; video->parent = &cdev->gadget->dev; - video->minor = -1; video->fops = &uvc_v4l2_fops; video->release = video_device_release; strncpy(video->name, cdev->gadget->name, sizeof(video->name)); @@ -462,23 +461,12 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f) INFO(cdev, "uvc_function_unbind\n"); - if (uvc->vdev) { - if (uvc->vdev->minor == -1) - video_device_release(uvc->vdev); - else - video_unregister_device(uvc->vdev); - uvc->vdev = NULL; - } - - if (uvc->control_ep) - uvc->control_ep->driver_data = NULL; - if (uvc->video.ep) - uvc->video.ep->driver_data = NULL; + video_unregister_device(uvc->vdev); + uvc->control_ep->driver_data = NULL; + uvc->video.ep->driver_data = NULL; - if (uvc->control_req) { - usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); - kfree(uvc->control_buf); - } + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); kfree(f->descriptors); kfree(f->hs_descriptors); @@ -563,7 +551,22 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) return 0; error: - uvc_function_unbind(c, f); + if (uvc->vdev) + video_device_release(uvc->vdev); + + if (uvc->control_ep) + uvc->control_ep->driver_data = NULL; + if (uvc->video.ep) + uvc->video.ep->driver_data = NULL; + + if (uvc->control_req) { + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); + } + + kfree(f->descriptors); + kfree(f->hs_descriptors); + kfree(f->ss_descriptors); return ret; } diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 1d16f41cc336..816334874f43 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -633,7 +633,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf) status = STS_PCD; } } - /* FIXME autosuspend idle root hubs */ + spin_unlock_irqrestore (&ehci->lock, flags); return status ? retval : 0; } diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index e669c6a7e91e..12d3f284998e 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -371,7 +371,7 @@ static const struct hc_driver ehci_omap_hc_driver = { .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; -MODULE_ALIAS("platform:omap-ehci"); +MODULE_ALIAS("platform:ehci-omap"); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>"); diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 123481793a47..0909783a034a 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -362,7 +362,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x1E26 || pdev->device == 0x8C2D || - pdev->device == 0x8C26); + pdev->device == 0x8C26 || + pdev->device == 0x9C26); } static void ehci_enable_xhci_companion(void) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 0f26ec2cedd4..a5feaa2dbb00 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) else { qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list); - /* first qtd may already be partially processed */ - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { + qh->hw->hw_qtd_next = qtd->hw_next; qtd = NULL; + } } if (qtd) diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index daea8817c2dd..3ae424534913 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 55d3d6414478..7841b0aa4072 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -466,7 +466,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) /* From the GPIO notifying the over-current situation, find * out the corresponding port */ at91_for_each_port(port) { - if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { + if (gpio_is_valid(pdata->overcurrent_pin[port]) && + gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { gpio = pdata->overcurrent_pin[port]; break; } @@ -569,6 +570,16 @@ static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev) if (pdata) { at91_for_each_port(i) { + /* + * do not configure PIO if not in relation with + * real USB port on board + */ + if (i >= pdata->ports) { + pdata->vbus_pin[i] = -EINVAL; + pdata->overcurrent_pin[i] = -EINVAL; + break; + } + if (!gpio_is_valid(pdata->vbus_pin[i])) continue; gpio = pdata->vbus_pin[i]; diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index c5a1ea9145fa..9d00d4721bca 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1128,6 +1128,25 @@ dl_done_list (struct ohci_hcd *ohci) while (td) { struct td *td_next = td->next_dl_td; + struct ed *ed = td->ed; + + /* + * Some OHCI controllers (NVIDIA for sure, maybe others) + * occasionally forget to add TDs to the done queue. Since + * TDs for a given endpoint are always processed in order, + * if we find a TD on the donelist then all of its + * predecessors must be finished as well. + */ + for (;;) { + struct td *td2; + + td2 = list_first_entry(&ed->td_list, struct td, + td_list); + if (td2 == td) + break; + takeback_td(ohci, td2); + } + takeback_td(ohci, td); td = td_next; } diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index c5e9e4a76f14..78933512c18b 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -75,7 +75,9 @@ #define NB_PIF0_PWRDOWN_1 0x01100013 #define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 #define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC static struct amd_chipset_info { struct pci_dev *nb_dev; @@ -543,7 +545,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { /* Pegatron Lucid (Ordissimo AIRIS) */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "M11JB"), - DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* Pegatron Lucid (Ordissimo) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), }, }, { } @@ -714,6 +723,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, } #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) { @@ -727,7 +737,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) { return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI; + (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); } bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) @@ -769,13 +780,22 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) "defaulting to EHCI.\n"); dev_warn(&xhci_pdev->dev, "USB 3.0 devices will work at USB 2.0 speeds.\n"); + usb_disable_xhci_ports(xhci_pdev); return; } - ports_available = 0xffffffff; + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register + * Indicate the ports that can be changed from OS. + */ + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", + ports_available); + /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable - * Register, to turn on SuperSpeed terminations for all - * available ports. + * Register, to turn on SuperSpeed terminations for the + * switchable ports. */ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, cpu_to_le32(ports_available)); @@ -785,7 +805,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " "under xHCI: 0x%x\n", ports_available); - ports_available = 0xffffffff; + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register + * Indicate the USB 2.0 ports to be controlled by the xHCI host. + */ + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", + ports_available); + /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to * switch the USB 2.0 power and data lines over to the xHCI * host. @@ -822,12 +851,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) void __iomem *op_reg_base; u32 val; int timeout; + int len = pci_resource_len(pdev, 0); if (!mmio_resource_enabled(pdev, 0)) return; - base = ioremap_nocache(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + base = ioremap_nocache(pci_resource_start(pdev, 0), len); if (base == NULL) return; @@ -837,9 +866,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) */ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); do { + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, + "xHCI controller failing to respond"); + return; + } + if (!ext_cap_offset) /* We've reached the end of the extended capabilities */ goto hc_init; + val = readl(base + ext_cap_offset); if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) break; @@ -870,9 +907,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) /* Disable any BIOS SMIs and clear all SMI events*/ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); +hc_init: if (usb_is_intel_switchable_xhci(pdev)) usb_enable_xhci_ports(pdev); -hc_init: + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); /* Wait for the host controller to be ready before writing any diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index ef004a5de20f..7f69a39163ce 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -15,6 +15,7 @@ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} static inline void usb_amd_dev_put(void) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} #endif /* CONFIG_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index e4db350602b8..3fe069f536b2 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) return IRQ_NONE; uhci_writew(uhci, status, USBSTS); /* Clear it */ + spin_lock(&uhci->lock); + if (unlikely(!uhci->is_initialized)) /* not yet configured */ + goto done; + if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { if (status & USBSTS_HSE) dev_err(uhci_dev(uhci), "host system error, " @@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) dev_err(uhci_dev(uhci), "host controller process " "error, something bad happened!\n"); if (status & USBSTS_HCH) { - spin_lock(&uhci->lock); if (uhci->rh_state >= UHCI_RH_RUNNING) { dev_err(uhci_dev(uhci), "host controller halted, " @@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) * pending unlinks */ mod_timer(&hcd->rh_timer, jiffies); } - spin_unlock(&uhci->lock); } } - if (status & USBSTS_RD) + if (status & USBSTS_RD) { + spin_unlock(&uhci->lock); usb_hcd_poll_rh_status(hcd); - else { - spin_lock(&uhci->lock); + } else { uhci_scan_schedule(uhci); + done: spin_unlock(&uhci->lock); } @@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd) */ mb(); + spin_lock_irq(&uhci->lock); configure_hc(uhci); uhci->is_initialized = 1; - spin_lock_irq(&uhci->lock); start_rh(uhci); spin_unlock_irq(&uhci->lock); return 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 824fc0f4e474..2bb098dff111 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -493,11 +493,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg) * when this bit is set. */ pls |= USB_PORT_STAT_CONNECTION; + } else { + /* + * If CAS bit isn't set but the Port is already at + * Compliance Mode, fake a connection so the USB core + * notices the Compliance state and resets the port. + * This resolves an issue generated by the SN65LVPE502CP + * in which sometimes the port enters compliance mode + * caused by a delay on the host-device negotiation. + */ + if (pls == USB_SS_PORT_LS_COMP_MOD) + pls |= USB_PORT_STAT_CONNECTION; } + /* update status field */ *status |= pls; } +/* + * Function for Compliance Mode Quirk. + * + * This Function verifies if all xhc USB3 ports have entered U0, if so, + * the compliance mode timer is deleted. A port won't enter + * compliance mode if it has previously entered U0. + */ +void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) +{ + u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1); + bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); + + if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK)) + return; + + if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { + xhci->port_status_u0 |= 1 << wIndex; + if (xhci->port_status_u0 == all_ports_seen_u0) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n"); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n"); + } + } +} + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { @@ -644,6 +681,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* Update Port Link State for super speed ports*/ if (hcd->speed == HCD_USB3) { xhci_hub_report_link_state(&status, temp); + /* + * Verify if all USB3 Ports Have entered U0 already. + * Delete Compliance Mode Timer if so. + */ + xhci_del_comp_mod_timer(xhci, temp, wIndex); } if (bus_state->port_c_suspend & (1 << wIndex)) status |= 1 << USB_PORT_FEAT_C_SUSPEND; @@ -711,12 +753,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case USB_PORT_FEAT_LINK_STATE: temp = xhci_readl(xhci, port_array[wIndex]); + + /* Disable port */ + if (link_state == USB_SS_PORT_LS_SS_DISABLED) { + xhci_dbg(xhci, "Disable port %d\n", wIndex); + temp = xhci_port_state_to_neutral(temp); + /* + * Clear all change bits, so that we get a new + * connection event. + */ + temp |= PORT_CSC | PORT_PEC | PORT_WRC | + PORT_OCC | PORT_RC | PORT_PLC | + PORT_CEC; + xhci_writel(xhci, temp | PORT_PE, + port_array[wIndex]); + temp = xhci_readl(xhci, port_array[wIndex]); + break; + } + + /* Put link in RxDetect (enable port) */ + if (link_state == USB_SS_PORT_LS_RX_DETECT) { + xhci_dbg(xhci, "Enable port %d\n", wIndex); + xhci_set_link_state(xhci, port_array, wIndex, + link_state); + temp = xhci_readl(xhci, port_array[wIndex]); + break; + } + /* Software should not attempt to set - * port link state above '5' (Rx.Detect) and the port + * port link state above '3' (U3) and the port * must be enabled. */ if ((temp & PORT_PE) == 0 || - (link_state > USB_SS_PORT_LS_RX_DETECT)) { + (link_state > USB_SS_PORT_LS_U3)) { xhci_warn(xhci, "Cannot set link state.\n"); goto error; } @@ -871,6 +940,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) int max_ports; __le32 __iomem **port_array; struct xhci_bus_state *bus_state; + bool reset_change = false; max_ports = xhci_get_ports(hcd, &port_array); bus_state = &xhci->bus_state[hcd_index(hcd)]; @@ -902,6 +972,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) buf[(i + 1) / 8] |= 1 << (i + 1) % 8; status = 1; } + if ((temp & PORT_RC)) + reset_change = true; + } + if (!status && !reset_change) { + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); } spin_unlock_irqrestore(&xhci->lock, flags); return status ? retval : 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3e16f1c160a2..b42a6fb45b22 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { + if (ep->desc.bInterval == 0) + return 0; return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval, 0, 15); } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index f1527404617c..84e82dc7b794 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -29,6 +29,7 @@ /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_ASROCK_P67 0x7023 @@ -58,8 +59,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) { - if (pdev->revision == 0x0) { + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && + pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); @@ -99,6 +102,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; + xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 689bc18b051d..df90fe51b4aa 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -118,7 +118,7 @@ static int xhci_plat_probe(struct platform_device *pdev) goto put_hcd; } - hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_dbg(&pdev->dev, "error mapping memory\n"); ret = -EFAULT; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ffd2386b95c4..f99e643770ca 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -297,7 +297,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) xhci_dbg(xhci, "Abort command ring\n"); if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) { - xhci_dbg(xhci, "The command ring isn't running, "\ + xhci_dbg(xhci, "The command ring isn't running, " "Have the command ring been stopped?\n"); return 0; } @@ -321,7 +321,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) ret = handshake(xhci, &xhci->op_regs->cmd_ring, CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { - xhci_err(xhci, "Stopped the command ring failed, "\ + xhci_err(xhci, "Stopped the command ring failed, " "maybe the host is dead\n"); xhci->xhc_state |= XHCI_STATE_DYING; xhci_quiesce(xhci); @@ -367,7 +367,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, spin_lock_irqsave(&xhci->lock, flags); if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_warn(xhci, "Abort the command ring,"\ + xhci_warn(xhci, "Abort the command ring," " but the xHCI is dead.\n"); retval = -ESHUTDOWN; goto fail; @@ -1227,6 +1227,16 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd) /* find the current segment of command ring */ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, xhci->cmd_ring->dequeue, &cycle_state); + if (!cur_seg) { + xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n", + xhci->cmd_ring->dequeue, + (unsigned long long) + xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue)); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + return; + } /* find the command trb matched by cd from command ring */ for (cmd_trb = xhci->cmd_ring->dequeue; @@ -1687,7 +1697,7 @@ static void handle_port_status(struct xhci_hcd *xhci, faked_port_index + 1); if (slot_id && xhci->devs[slot_id]) xhci_ring_device(xhci, slot_id); - if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { + if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { bus_state->port_remote_wakeup &= ~(1 << faked_port_index); xhci_test_and_clear_bit(xhci, port_array, @@ -1714,6 +1724,15 @@ cleanup: if (bogus_port_status) return; + /* + * xHCI port-status-change events occur when the "or" of all the + * status-change bits in the portsc register changes from 0 to 1. + * New status changes won't cause an event if any other change + * bits are still set. When an event occurs, switch over to + * polling to avoid losing status changes. + */ + xhci_dbg(xhci, "%s: starting port polling.\n", __func__); + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); spin_unlock(&xhci->lock); /* Pass this up to the core */ usb_hcd_poll_rh_status(hcd); @@ -2582,6 +2601,8 @@ cleanup: (trb_comp_code != COMP_STALL && trb_comp_code != COMP_BABBLE)) xhci_urb_free_priv(xhci, urb_priv); + else + kfree(urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -3073,11 +3094,11 @@ static u32 xhci_td_remainder(unsigned int remainder) } /* - * For xHCI 1.0 host controllers, TD size is the number of packets remaining in - * the TD (*not* including this TRB). + * For xHCI 1.0 host controllers, TD size is the number of max packet sized + * packets remaining in the TD (*not* including this TRB). * * Total TD packet count = total_packet_count = - * roundup(TD size in bytes / wMaxPacketSize) + * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) * * Packets transferred up to and including this TRB = packets_transferred = * rounddown(total bytes transferred including this TRB / wMaxPacketSize) @@ -3085,24 +3106,27 @@ static u32 xhci_td_remainder(unsigned int remainder) * TD size = total_packet_count - packets_transferred * * It must fit in bits 21:17, so it can't be bigger than 31. + * The last TRB in a TD must have the TD size set to zero. */ - static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, - unsigned int total_packet_count, struct urb *urb) + unsigned int total_packet_count, struct urb *urb, + unsigned int num_trbs_left) { int packets_transferred; /* One TRB with a zero-length data packet. */ - if (running_total == 0 && trb_buff_len == 0) + if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) return 0; /* All the TRB queueing functions don't count the current TRB in * running_total. */ packets_transferred = (running_total + trb_buff_len) / - usb_endpoint_maxp(&urb->ep->desc); + GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); - return xhci_td_remainder(total_packet_count - packets_transferred); + if ((total_packet_count - packets_transferred) > 31) + return 31 << 17; + return (total_packet_count - packets_transferred) << 17; } static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, @@ -3129,7 +3153,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs = count_sg_trbs_needed(xhci, urb); num_sgs = urb->num_mapped_sgs; - total_packet_count = roundup(urb->transfer_buffer_length, + total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, usb_endpoint_maxp(&urb->ep->desc)); trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], @@ -3212,7 +3236,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, running_total); } else { remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb); + trb_buff_len, total_packet_count, urb, + num_trbs - 1); } length_field = TRB_LEN(trb_buff_len) | remainder | @@ -3320,7 +3345,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, start_cycle = ep_ring->cycle_state; running_total = 0; - total_packet_count = roundup(urb->transfer_buffer_length, + total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, usb_endpoint_maxp(&urb->ep->desc)); /* How much data is in the first TRB? */ addr = (u64) urb->transfer_dma; @@ -3366,7 +3391,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, running_total); } else { remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb); + trb_buff_len, total_packet_count, urb, + num_trbs - 1); } length_field = TRB_LEN(trb_buff_len) | remainder | @@ -3629,8 +3655,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = start_addr + urb->iso_frame_desc[i].offset; td_len = urb->iso_frame_desc[i].length; td_remain_len = td_len; - total_packet_count = roundup(td_len, - usb_endpoint_maxp(&urb->ep->desc)); + total_packet_count = DIV_ROUND_UP(td_len, + GET_MAX_PACKET( + usb_endpoint_maxp(&urb->ep->desc))); /* A zero-length transfer still involves at least one packet. */ if (total_packet_count == 0) total_packet_count++; @@ -3652,9 +3679,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td = urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { u32 remainder = 0; - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); + field = 0; if (first_trb) { + field = TRB_TBC(burst_count) | + TRB_TLBPC(residue); /* Queue the isoc TRB */ field |= TRB_TYPE(TRB_ISOC); /* Assume URB_ISO_ASAP is set */ @@ -3685,7 +3714,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { td->last_trb = ep_ring->enqueue; field |= TRB_IOC; - if (xhci->hci_version == 0x100) { + if (xhci->hci_version == 0x100 && + !(xhci->quirks & + XHCI_AVOID_BEI)) { /* Set BEI bit except for the last td */ if (i < num_tds - 1) field |= TRB_BEI; @@ -3706,7 +3737,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { remainder = xhci_v1_0_td_remainder( running_total, trb_buff_len, - total_packet_count, urb); + total_packet_count, urb, + (trbs_per_td - j - 1)); } length_field = TRB_LEN(trb_buff_len) | remainder | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3206624fbc4a..97799978630f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/dmi.h> #include "xhci.h" @@ -399,6 +400,98 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) #endif +static void compliance_mode_recovery(unsigned long arg) +{ + struct xhci_hcd *xhci; + struct usb_hcd *hcd; + u32 temp; + int i; + + xhci = (struct xhci_hcd *)arg; + + for (i = 0; i < xhci->num_usb3_ports; i++) { + temp = xhci_readl(xhci, xhci->usb3_ports[i]); + if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { + /* + * Compliance Mode Detected. Letting USB Core + * handle the Warm Reset + */ + xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n", + i + 1); + xhci_dbg(xhci, "Attempting Recovery routine!\n"); + hcd = xhci->shared_hcd; + + if (hcd->state == HC_STATE_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + usb_hcd_poll_rh_status(hcd); + } + } + + if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) + mod_timer(&xhci->comp_mode_recovery_timer, + jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); +} + +/* + * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver + * that causes ports behind that hardware to enter compliance mode sometimes. + * The quirk creates a timer that polls every 2 seconds the link state of + * each host controller's port and recovers it by issuing a Warm reset + * if Compliance mode is detected, otherwise the port will become "dead" (no + * device connections or disconnections will be detected anymore). Becasue no + * status event is generated when entering compliance mode (per xhci spec), + * this quirk is needed on systems that have the failing hardware installed. + */ +static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) +{ + xhci->port_status_u0 = 0; + init_timer(&xhci->comp_mode_recovery_timer); + + xhci->comp_mode_recovery_timer.data = (unsigned long) xhci; + xhci->comp_mode_recovery_timer.function = compliance_mode_recovery; + xhci->comp_mode_recovery_timer.expires = jiffies + + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); + + set_timer_slack(&xhci->comp_mode_recovery_timer, + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); + add_timer(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n"); +} + +/* + * This function identifies the systems that have installed the SN65LVPE502CP + * USB3.0 re-driver and that need the Compliance Mode Quirk. + * Systems: + * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 + */ +static bool compliance_mode_recovery_timer_quirk_check(void) +{ + const char *dmi_product_name, *dmi_sys_vendor; + + dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + if (!dmi_product_name || !dmi_sys_vendor) + return false; + + if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) + return false; + + if (strstr(dmi_product_name, "Z420") || + strstr(dmi_product_name, "Z620") || + strstr(dmi_product_name, "Z820") || + strstr(dmi_product_name, "Z1 Workstation")) + return true; + + return false; +} + +static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) +{ + return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); +} + + /* * Initialize memory for HCD and xHC (one-time init). * @@ -422,6 +515,12 @@ int xhci_init(struct usb_hcd *hcd) retval = xhci_mem_init(xhci, GFP_KERNEL); xhci_dbg(xhci, "Finished xhci_init\n"); + /* Initializing Compliance Mode Recovery Data If Needed */ + if (compliance_mode_recovery_timer_quirk_check()) { + xhci->quirks |= XHCI_COMP_MODE_QUIRK; + compliance_mode_recovery_timer_init(xhci); + } + return retval; } @@ -631,6 +730,11 @@ void xhci_stop(struct usb_hcd *hcd) del_timer_sync(&xhci->event_ring_timer); #endif + /* Deleting Compliance Mode Recovery Timer */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) + del_timer_sync(&xhci->comp_mode_recovery_timer); + if (xhci->quirks & XHCI_AMD_PLL_FIX) usb_amd_dev_put(); @@ -776,6 +880,11 @@ int xhci_suspend(struct xhci_hcd *xhci) struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; + /* Don't poll the roothubs on bus suspend. */ + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); @@ -787,7 +896,7 @@ int xhci_suspend(struct xhci_hcd *xhci) command &= ~CMD_RUN; xhci_writel(xhci, command, &xhci->op_regs->command); if (handshake(xhci, &xhci->op_regs->status, - STS_HALT, STS_HALT, 100*100)) { + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -808,6 +917,16 @@ int xhci_suspend(struct xhci_hcd *xhci) } spin_unlock_irq(&xhci->lock); + /* + * Deleting Compliance Mode Recovery Timer because the xHCI Host + * is about to be suspended. + */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n"); + } + /* step 5: remove core well power */ /* synchronize irq when using MSI-X */ xhci_msix_sync_irqs(xhci); @@ -940,6 +1059,21 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) usb_hcd_resume_root_hub(hcd); usb_hcd_resume_root_hub(xhci->shared_hcd); } + + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject + * to suffer the Compliance Mode issue again. It doesn't matter if + * ports have entered previously to U0 before system's suspension. + */ + if (xhci->quirks & XHCI_COMP_MODE_QUIRK) + compliance_mode_recovery_timer_init(xhci); + + /* Re-enable port polling. */ + xhci_dbg(xhci, "%s: starting port polling.\n", __func__); + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + usb_hcd_poll_rh_status(hcd); + return retval; } #endif /* CONFIG_PM */ @@ -2130,7 +2264,7 @@ static bool xhci_is_async_ep(unsigned int ep_type) static bool xhci_is_sync_in_ep(unsigned int ep_type) { - return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); + return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); } static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7eed733cfb06..0211e6dac8d4 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1503,6 +1503,8 @@ struct xhci_hcd { #define XHCI_AMD_0x96_HOST (1 << 9) #define XHCI_TRUST_TX_LENGTH (1 << 10) #define XHCI_SPURIOUS_REBOOT (1 << 13) +#define XHCI_COMP_MODE_QUIRK (1 << 14) +#define XHCI_AVOID_BEI (1 << 15) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1519,6 +1521,11 @@ struct xhci_hcd { unsigned sw_lpm_support:1; /* support xHCI 1.0 spec USB2 hardware LPM */ unsigned hw_lpm_support:1; + /* Compliance Mode Recovery Data */ + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; +/* Compliance Mode Timer Triggered every 2 seconds */ +#define COMP_MODE_RCVRY_MSECS 2000 }; /* convert between an HCD pointer and the corresponding EHCI_HCD */ diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index 4e0f167a6c4e..8be96de5f6e7 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -256,7 +256,7 @@ wraperr: return err; } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 66bc376005d2..319cfcf372ad 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -1313,6 +1313,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(cppi_interrupt); /* Instantiate a software object representing a DMA controller. */ struct dma_controller *__init diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 66aaccf04490..a2b4008dc069 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2400,10 +2400,7 @@ static int __init musb_init(void) if (usb_disabled()) return 0; - pr_info("%s: version " MUSB_VERSION ", " - "?dma?" - ", " - "otg (peripheral+host)", + pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n", musb_driver_name); return platform_driver_register(&musb_driver); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 53e7e692d616..f38a27831466 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index f5819cb4dcf4..87ef1503552d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -197,6 +197,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -584,9 +585,12 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* * ELV devices: */ + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -673,6 +677,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, @@ -704,6 +709,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, @@ -804,13 +810,32 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) }, + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0x00) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, + { USB_DEVICE(FTDI_VID, PI_C865_PID) }, + { USB_DEVICE(FTDI_VID, PI_C857_PID) }, + { USB_DEVICE(PI_VID, PI_C866_PID) }, + { USB_DEVICE(PI_VID, PI_C663_PID) }, + { USB_DEVICE(PI_VID, PI_C725_PID) }, + { USB_DEVICE(PI_VID, PI_E517_PID) }, + { USB_DEVICE(PI_VID, PI_C863_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(PI_VID, PI_C867_PID) }, + { USB_DEVICE(PI_VID, PI_E609_PID) }, + { USB_DEVICE(PI_VID, PI_E709_PID) }, + { USB_DEVICE(PI_VID, PI_100F_PID) }, + { USB_DEVICE(PI_VID, PI_1011_PID) }, + { USB_DEVICE(PI_VID, PI_1012_PID) }, + { USB_DEVICE(PI_VID, PI_1013_PID) }, + { USB_DEVICE(PI_VID, PI_1014_PID) }, + { USB_DEVICE(PI_VID, PI_1015_PID) }, + { USB_DEVICE(PI_VID, PI_1016_PID) }, { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), @@ -858,6 +883,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, + /* Crucible Devices */ + { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1788,7 +1815,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) dbg("%s", __func__); if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100"))) + (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) return ftdi_jtag_probe(serial); return 0; @@ -1892,24 +1919,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on) { struct ftdi_private *priv = usb_get_serial_port_data(port); - mutex_lock(&port->serial->disc_mutex); - if (!port->serial->disconnected) { - /* Disable flow control */ - if (!on && usb_control_msg(port->serial->dev, + /* Disable flow control */ + if (!on) { + if (usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { - dev_err(&port->dev, "error from flowcontrol urb\n"); + dev_err(&port->dev, "error from flowcontrol urb\n"); } - /* drop RTS and DTR */ - if (on) - set_mctrl(port, TIOCM_DTR | TIOCM_RTS); - else - clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } - mutex_unlock(&port->serial->disc_mutex); + /* drop RTS and DTR */ + if (on) + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); + else + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } /* diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5dd96ca6c380..9d359e189a64 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -75,6 +75,9 @@ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB #define FTDI_OPENDCC_GBM_PID 0xBFDC +/* NZR SEM 16+ USB (http://www.nzr.de) */ +#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ + /* * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) */ @@ -144,6 +147,11 @@ #define XSENS_CONVERTER_6_PID 0xD38E #define XSENS_CONVERTER_7_PID 0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ + /* * NDI (www.ndigital.com) product ids */ @@ -201,7 +209,7 @@ /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403). * Further IDs taken from ELV Windows .inf file. * * The previously included PID for the UO 100 module was incorrect. @@ -209,6 +217,8 @@ * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_ELV_VID 0x1B1F /* ELV AG */ +#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ @@ -514,6 +524,11 @@ */ #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */ +/* + * TIAO product ids (FTDI_VID) + * http://www.tiaowiki.com/w/Main_Page + */ +#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ /********************************/ @@ -539,7 +554,10 @@ /* * Microchip Technology, Inc. * - * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by: + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are + * used by single function CDC ACM class based firmware demo + * applications. The VID/PID has also been used in firmware + * emulating FTDI serial chips by: * Hornby Elite - Digital Command Control Console * http://www.hornby.com/hornby-dcc/controllers/ */ @@ -741,6 +759,12 @@ #define TTI_VID 0x103E /* Vendor Id */ #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */ +/* + * Newport Cooperation (www.newport.com) + */ +#define NEWPORT_VID 0x104D +#define NEWPORT_AGILIS_PID 0x3000 + /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ #define INTERBIOMETRICS_VID 0x1209 @@ -791,8 +815,27 @@ * Physik Instrumente * http://www.physikinstrumente.com/en/products/ */ +/* These two devices use the VID of FTDI */ +#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */ +#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */ + #define PI_VID 0x1a72 /* Vendor ID */ -#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */ +#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */ +#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */ +#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */ +#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */ +#define PI_C863_PID 0x1007 /* PI C-863 */ +#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */ +#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */ +#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */ +#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */ +#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */ +#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */ +#define PI_1012_PID 0x1012 /* PI Motion Controller */ +#define PI_1013_PID 0x1013 /* PI Motion Controller */ +#define PI_1014_PID 0x1014 /* PI Device */ +#define PI_1015_PID 0x1015 /* PI Device */ +#define PI_1016_PID 0x1016 /* PI Digital Servo Module */ /* * Kondo Kagaku Co.Ltd. @@ -1223,3 +1266,9 @@ * ATI command output: Cinterion MC55i */ #define FTDI_CINTERION_MC55I_PID 0xA951 + +/* + * Product: Comet Caller ID decoder + * Manufacturer: Crucible Technologies + */ +#define FTDI_CT_COMET_PID 0x8e08 diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index ef4d7adfbd9d..933dd07aa087 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -514,31 +514,29 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); - mutex_lock(&port->serial->disc_mutex); - if (!port->serial->disconnected) { - /* drop DTR and RTS */ - spin_lock_irq(&priv->lock); - if (on) - priv->control_state |= TIOCM_DTR | TIOCM_RTS; - else - priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); - control_state = priv->control_state; - spin_unlock_irq(&priv->lock); - mct_u232_set_modem_ctrl(port->serial, control_state); - } - mutex_unlock(&port->serial->disc_mutex); + spin_lock_irq(&priv->lock); + if (on) + priv->control_state |= TIOCM_DTR | TIOCM_RTS; + else + priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); + control_state = priv->control_state; + spin_unlock_irq(&priv->lock); + + mct_u232_set_modem_ctrl(port->serial, control_state); } static void mct_u232_close(struct usb_serial_port *port) { dbg("%s port %d", __func__, port->number); - if (port->serial->dev) { - /* shutdown our urbs */ - usb_kill_urb(port->write_urb); - usb_kill_urb(port->read_urb); - usb_kill_urb(port->interrupt_in_urb); - } + /* + * Must kill the read urb as it is actually an interrupt urb, which + * generic close thus fails to kill. + */ + usb_kill_urb(port->read_urb); + usb_kill_urb(port->interrupt_in_urb); + + usb_serial_generic_close(port); } /* mct_u232_close */ diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index c1505c3c557a..c8542356898b 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -235,12 +235,10 @@ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *write_urb; /* write URB for this port */ struct urb *read_urb; /* read URB for this port */ - struct urb *int_urb; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ char open; char open_ports; - char zombie; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; @@ -505,7 +503,6 @@ static void mos7840_control_callback(struct urb *urb) unsigned char *data; struct moschip_port *mos7840_port; __u8 regval = 0x0; - int result = 0; int status = urb->status; mos7840_port = urb->context; @@ -524,7 +521,7 @@ static void mos7840_control_callback(struct urb *urb) default: dbg("%s - nonzero urb status received: %d", __func__, status); - goto exit; + return; } dbg("%s urb buffer size is %d", __func__, urb->actual_length); @@ -537,17 +534,6 @@ static void mos7840_control_callback(struct urb *urb) mos7840_handle_new_msr(mos7840_port, regval); else if (mos7840_port->MsrLsr == 1) mos7840_handle_new_lsr(mos7840_port, regval); - -exit: - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) - result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC); - spin_unlock(&mos7840_port->pool_lock); - if (result) { - dev_err(&urb->dev->dev, - "%s - Error %d submitting interrupt urb\n", - __func__, result); - } } static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, @@ -655,14 +641,7 @@ static void mos7840_interrupt_callback(struct urb *urb) wreg = MODEM_STATUS_REGISTER; break; } - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) { - rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); - } else { - spin_unlock(&mos7840_port->pool_lock); - return; - } - spin_unlock(&mos7840_port->pool_lock); + rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); } } } @@ -2590,7 +2569,6 @@ error: kfree(mos7840_port->ctrl_buf); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port); - serial->port[i] = NULL; } return status; } @@ -2603,7 +2581,6 @@ error: static void mos7840_disconnect(struct usb_serial *serial) { int i; - unsigned long flags; struct moschip_port *mos7840_port; dbg("%s", " disconnect :entering.........."); @@ -2621,9 +2598,6 @@ static void mos7840_disconnect(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg ("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { - spin_lock_irqsave(&mos7840_port->pool_lock, flags); - mos7840_port->zombie = 1; - spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); usb_kill_urb(mos7840_port->control_urb); } } @@ -2657,6 +2631,7 @@ static void mos7840_release(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { + usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port->ctrl_buf); kfree(mos7840_port->dr); kfree(mos7840_port); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 82cc9d202b83..1f850065d159 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -160,7 +160,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, { struct usb_serial *serial = port->serial; int retval; - u8 buffer[2]; + u8 *buffer; + + buffer = kzalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; buffer[0] = val; /* Send the message to the vendor control endpoint @@ -169,6 +173,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 0, 0, buffer, 1, 0); + kfree(buffer); return retval; } @@ -292,7 +297,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, if (!dr) { dev_err(&port->dev, "out of memory\n"); count = -ENOMEM; - goto error; + goto error_no_dr; } dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT; @@ -322,6 +327,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, return count; error: + kfree(dr); +error_no_dr: usb_free_urb(urb); error_no_urb: kfree(buffer); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ee693ccfdeb6..539247b934aa 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -80,6 +80,7 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 +#define HUAWEI_PRODUCT_E173 0x140C #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K4605 0x14C6 @@ -157,6 +158,7 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 +#define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_G1 0xA001 #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 @@ -192,6 +194,9 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 +#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ +#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -237,6 +242,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 +#define TELIT_PRODUCT_LE920 0x1200 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -282,6 +288,8 @@ static void option_instat_callback(struct urb *urb); /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 +#define ALCATEL_PRODUCT_X220_X500D 0x0017 +#define ALCATEL_PRODUCT_L100V 0x011e #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -423,9 +431,12 @@ static void option_instat_callback(struct urb *urb); #define MEDIATEK_VENDOR_ID 0x0e8d #define MEDIATEK_PRODUCT_DC_1COM 0x00a0 #define MEDIATEK_PRODUCT_DC_4COM 0x00a5 +#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7 #define MEDIATEK_PRODUCT_DC_5COM 0x00a4 #define MEDIATEK_PRODUCT_7208_1COM 0x7101 #define MEDIATEK_PRODUCT_7208_2COM 0x7102 +#define MEDIATEK_PRODUCT_7103_2COM 0x7103 +#define MEDIATEK_PRODUCT_7106_2COM 0x7106 #define MEDIATEK_PRODUCT_FP_1COM 0x0003 #define MEDIATEK_PRODUCT_FP_2COM 0x0023 #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043 @@ -435,6 +446,18 @@ static void option_instat_callback(struct urb *urb); #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 +/* Hyundai Petatel Inc. products */ +#define PETATEL_VENDOR_ID 0x1ff4 +#define PETATEL_PRODUCT_NP10T 0x600e + +/* TP-LINK Incorporated products */ +#define TPLINK_VENDOR_ID 0x2357 +#define TPLINK_PRODUCT_MA180 0x0201 + +/* Changhong products */ +#define CHANGHONG_VENDOR_ID 0x2077 +#define CHANGHONG_PRODUCT_CH690 0x7001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -456,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = { static const struct option_blacklist_info alcatel_x200_blacklist = { .sendsetup = BIT(0) | BIT(1), + .reserved = BIT(4), }; static const struct option_blacklist_info zte_0037_blacklist = { @@ -503,11 +527,24 @@ static const struct option_blacklist_info net_intf5_blacklist = { .reserved = BIT(5), }; +static const struct option_blacklist_info net_intf6_blacklist = { + .reserved = BIT(6), +}; + static const struct option_blacklist_info zte_mf626_blacklist = { .sendsetup = BIT(0) | BIT(1), .reserved = BIT(4), }; +static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), +}; + +static const struct option_blacklist_info telit_le920_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(5), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -539,6 +576,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, + { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), @@ -697,6 +742,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -719,6 +765,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -753,6 +801,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -853,14 +903,24 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, @@ -870,8 +930,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, @@ -879,15 +941,38 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -1003,18 +1088,24 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_1255_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, @@ -1059,8 +1150,20 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, @@ -1072,15 +1175,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, @@ -1092,6 +1201,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, + { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ @@ -1109,6 +1222,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), @@ -1213,7 +1336,15 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 966245680f55..b223381da32a 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -36,8 +36,6 @@ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712 #define UTSTARCOM_PRODUCT_UM175_V2 0x3714 #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715 -#define PANTECH_PRODUCT_UML190_VZW 0x3716 -#define PANTECH_PRODUCT_UML290_VZW 0x3718 /* CMOTECH devices */ #define CMOTECH_VENDOR_ID 0x16d8 @@ -68,11 +66,9 @@ static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) }, - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */ - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */ - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */ + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */ + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */ + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 50b5371719f2..d46277d33073 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ /* Gobi 2000 devices */ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 449bf6d31e2a..8ec15c2540b8 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -171,7 +171,6 @@ static int sierra_probe(struct usb_serial *serial, { int result = 0; struct usb_device *udev; - struct sierra_intf_private *data; u8 ifnum; udev = serial->dev; @@ -199,11 +198,6 @@ static int sierra_probe(struct usb_serial *serial, return -ENODEV; } - data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL); - if (!data) - return -ENOMEM; - spin_lock_init(&data->susp_lock); - return result; } @@ -896,24 +890,19 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) static void sierra_dtr_rts(struct usb_serial_port *port, int on) { - struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); portdata->rts_state = on; portdata->dtr_state = on; - if (serial->dev) { - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) - sierra_send_setup(port); - mutex_unlock(&serial->disc_mutex); - } + sierra_send_setup(port); } static int sierra_startup(struct usb_serial *serial) { struct usb_serial_port *port; + struct sierra_intf_private *intfdata; struct sierra_port_private *portdata; struct sierra_iface_info *himemoryp = NULL; int i; @@ -921,6 +910,14 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&serial->dev->dev, "%s\n", __func__); + intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL); + if (!intfdata) + return -ENOMEM; + + spin_lock_init(&intfdata->susp_lock); + + usb_set_serial_data(serial, intfdata); + /* Set Device mode to D0 */ sierra_set_power_state(serial->dev, 0x0000); @@ -936,7 +933,7 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&port->dev, "%s: kmalloc for " "sierra_port_private (%d) failed!\n", __func__, i); - return -ENOMEM; + goto err; } spin_lock_init(&portdata->lock); init_usb_anchor(&portdata->active); @@ -973,6 +970,14 @@ static int sierra_startup(struct usb_serial *serial) } return 0; +err: + for (--i; i >= 0; --i) { + portdata = usb_get_serial_port_data(serial->port[i]); + kfree(portdata); + } + kfree(intfdata); + + return -ENOMEM; } static void sierra_release(struct usb_serial *serial) @@ -992,6 +997,7 @@ static void sierra_release(struct usb_serial *serial) continue; kfree(portdata); } + kfree(serial->private); } #ifdef CONFIG_PM diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 3cdc8a52de44..b8db69d96df6 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -532,19 +532,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) dbg("%s\n", __func__); - mutex_lock(&port->serial->disc_mutex); - if (!port->serial->disconnected) { - /* Disable flow control */ - if (!on && - ssu100_setregister(dev, 0, UART_MCR, 0) < 0) + /* Disable flow control */ + if (!on) { + if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0) dev_err(&port->dev, "error from flowcontrol urb\n"); - /* drop RTS and DTR */ - if (on) - set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); - else - clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); } - mutex_unlock(&port->serial->disc_mutex); + /* drop RTS and DTR */ + if (on) + set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); + else + clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); } static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index bcf261778d07..e4b199cbc97b 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -699,10 +699,20 @@ static int serial_carrier_raised(struct tty_port *port) static void serial_dtr_rts(struct tty_port *port, int on) { struct usb_serial_port *p = container_of(port, struct usb_serial_port, port); - struct usb_serial_driver *drv = p->serial->type; + struct usb_serial *serial = p->serial; + struct usb_serial_driver *drv = serial->type; - if (drv->dtr_rts) + if (!drv->dtr_rts) + return; + /* + * Work-around bug in the tty-layer which can result in dtr_rts + * being called after a disconnect (and tty_unregister_device + * has returned). Remove once bug has been squashed. + */ + mutex_lock(&serial->disc_mutex); + if (!serial->disconnected) drv->dtr_rts(p, on); + mutex_unlock(&serial->disc_mutex); } static const struct tty_port_operations serial_port_ops = { @@ -768,7 +778,7 @@ int usb_serial_probe(struct usb_interface *interface, if (retval) { dbg("sub driver rejected device"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return retval; } @@ -840,7 +850,7 @@ int usb_serial_probe(struct usb_interface *interface, */ if (num_bulk_in == 0 || num_bulk_out == 0) { dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return -ENODEV; } @@ -854,7 +864,7 @@ int usb_serial_probe(struct usb_interface *interface, if (num_ports == 0) { dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return -EIO; } diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index c88657dd31c8..820436ec60e9 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -41,7 +41,6 @@ static bool debug; void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { - struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; @@ -54,12 +53,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) return; portdata = usb_get_serial_port_data(port); - mutex_lock(&serial->disc_mutex); + /* FIXME: locking */ portdata->rts_state = on; portdata->dtr_state = on; - if (serial->dev) - intfdata->send_setup(port); - mutex_unlock(&serial->disc_mutex); + + intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_dtr_rts); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 407e23c87946..171226ab56f2 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -525,6 +525,7 @@ no_firmware: "%s: please contact support@connecttech.com\n", serial->type->description); kfree(result); + kfree(command); return -ENODEV; no_command_private: diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 7691c866637b..685edc872654 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig @@ -203,7 +203,7 @@ config USB_STORAGE_ENE_UB6250 config USB_UAS tristate "USB Attached SCSI" - depends on USB && SCSI + depends on USB && SCSI && BROKEN help The USB Attached SCSI protocol is supported by some USB storage devices. It permits higher performance by supporting diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900150c1..7ab9046ae0ec 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) return 0; } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us) { int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us) US_DEBUGP("Huawei mode set result is %d\n", result); return 0; } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ + int result = 0; + int act_len = 0; + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); + bcbw->Tag = 0; + bcbw->DataTransferLength = 0; + bcbw->Flags = bcbw->Lun = 0; + bcbw->Length = sizeof(rewind_cmd); + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, + US_BULK_CB_WRAP_LEN, &act_len); + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); + return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ + struct usb_interface_descriptor *idesc; + int idProduct; + + idesc = &us->pusb_intf->cur_altsetting->desc; + idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct); + /* The first port is CDROM, + * means the dongle in the single port mode, + * and a switch command is required to be sent. */ + if (idesc && idesc->bInterfaceNumber == 0) { + if ((idProduct == 0x1001) + || (idProduct == 0x1003) + || (idProduct == 0x1004) + || (idProduct >= 0x1401 && idProduct <= 0x1500) + || (idProduct >= 0x1505 && idProduct <= 0x1600) + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { + return 1; + } + } + return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ + int result = 0; + + if (usb_stor_huawei_dongles_pid(us)) { + if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446) + result = usb_stor_huawei_scsi_init(us); + else + result = usb_stor_huawei_feature_init(us); + } + return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fbb06b..5376d4fc76f0 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 2c8553026222..65a6a75066a8 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index a47ba0f7307c..3db470a74b09 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1004,6 +1004,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999, USB_SC_8070, USB_PR_CB, NULL, US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ), +/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */ +UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100, + "Casio", + "EX-N1 DigitalCamera", + USB_SC_8070, USB_PR_DEVICE, NULL, 0), + /* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001, "Samsung", @@ -1509,341 +1515,16 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, /* Reported by fangxiaozhi <huananhu@huawei.com> * This brings the HUAWEI data card devices into multi-port mode */ -UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 0), UNUSUAL_DEV( 0x12d1, 0x1446, 0x0000, 0x0000, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 0), /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 2653e73db623..e73cf5630f8f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); .useTransport = use_transport, \ } +#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ + vendor_name, product_name, use_protocol, use_transport, \ + init_function, Flags) \ +{ \ + .vendorName = vendor_name, \ + .productName = product_name, \ + .useProtocol = use_protocol, \ + .useTransport = use_transport, \ + .initFunction = init_function, \ +} + static struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids = #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF #ifdef CONFIG_LOCKDEP diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b96927914f89..a9b5f2e29189 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -46,6 +46,20 @@ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \ .driver_info = ((useType)<<24) } +/* Define the device is matched with Vendor ID and interface descriptors */ +#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ + vendorName, productName, useProtocol, useTransport, \ + initFunction, flags) \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ + | USB_DEVICE_ID_MATCH_VENDOR, \ + .idVendor = (id_vendor), \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr), \ + .driver_info = (flags) \ +} + struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids); #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF /* diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5c170100de9c..4134e53d7311 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -376,7 +376,8 @@ static void handle_rx(struct vhost_net *net) .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; - int err, headcount, mergeable; + int err, mergeable; + s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; /* TODO: check that we are running from vhost_worker? */ diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 51e4c1eeec4f..1a9e2a9b8560 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1074,7 +1074,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; - _iov->iov_len = min((u64)len, size); + _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 550dbf0bb896..feda482d4af1 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -791,7 +791,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8860_i2c_resume(struct i2c_client *client) { - adp8860_set_bits(client, ADP8860_MDCR, NSTBY); + adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN); return 0; } diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 9be58c6f18f1..c7a2c35abdda 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -965,7 +965,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8870_i2c_resume(struct i2c_client *client) { - adp8870_set_bits(client, ADP8870_MDCR, NSTBY); + adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN); return 0; } diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 2e471c22abf5..5bf163e03840 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work) struct vc_data *vc = NULL; int c; int mode; + int ret; + + /* FIXME: we should sort out the unbind locking instead */ + /* instead we just fail to flash the cursor if we can't get + * the lock instead of blocking fbcon deinit */ + ret = console_trylock(); + if (ret == 0) + return; - console_lock(); if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; @@ -522,6 +529,33 @@ static int search_for_mapped_con(void) return retval; } +static int do_fbcon_takeover(int show_logo) +{ + int err, i; + + if (!num_registered_fb) + return -ENODEV; + + if (!show_logo) + logo_shown = FBCON_LOGO_DONTSHOW; + + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = info_idx; + + err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, + fbcon_is_default); + + if (err) { + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = -1; + info_idx = -1; + } else { + fbcon_has_console_bind = 1; + } + + return err; +} + static int fbcon_takeover(int show_logo) { int err, i; @@ -983,7 +1017,7 @@ static const char *fbcon_startup(void) } /* Setup default font */ - if (!p->fontdata) { + if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, @@ -993,6 +1027,8 @@ static const char *fbcon_startup(void) vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ + } else { + p->fontdata = vc->vc_font.data; } cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); @@ -1152,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init) ops->p = &fb_display[fg_console]; } -static void fbcon_free_font(struct display *p) +static void fbcon_free_font(struct display *p, bool freefont) { - if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) + if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); p->fontdata = NULL; p->userfont = 0; @@ -1166,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc) struct fb_info *info; struct fbcon_ops *ops; int idx; + bool free_font = true; - fbcon_free_font(p); idx = con2fb_map[vc->vc_num]; if (idx == -1) @@ -1178,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc) if (!info) goto finished; + if (info->flags & FBINFO_MISC_FIRMWARE) + free_font = false; ops = info->fbcon_par; if (!ops) @@ -1189,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc) ops->flags &= ~FBCON_FLAGS_INIT; finished: + fbcon_free_font(p, free_font); + if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -2970,7 +3010,7 @@ static int fbcon_unbind(void) { int ret; - ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, + ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (!ret) @@ -3043,7 +3083,7 @@ static int fbcon_fb_unregistered(struct fb_info *info) primary_device = -1; if (!num_registered_fb) - unregister_con_driver(&fb_con); + do_unregister_con_driver(&fb_con); return 0; } @@ -3108,7 +3148,7 @@ static int fbcon_fb_registered(struct fb_info *info) } if (info_idx != -1) - ret = fbcon_takeover(1); + ret = do_fbcon_takeover(1); } else { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index d449a74d4a31..5855d17d19ac 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; char *charmap; - + bool clear_attribs = false; if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; @@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) /* if 512 char mode is already enabled don't re-enable it. */ if ((set) && (ch512 != vga_512_chars)) { - /* attribute controller */ - for (i = 0; i < MAX_NR_CONSOLES; i++) { - struct vc_data *c = vc_cons[i].d; - if (c && c->vc_sw == &vga_con) - c->vc_hi_font_mask = ch512 ? 0x0800 : 0; - } vga_512_chars = ch512; /* 256-char: enable intensity bit 512-char: disable intensity bit */ @@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) it means, but it works, and it appears necessary */ inb_p(video_port_status); vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); + clear_attribs = true; } raw_spin_unlock_irq(&vga_lock); + + if (clear_attribs) { + for (i = 0; i < MAX_NR_CONSOLES; i++) { + struct vc_data *c = vc_cons[i].d; + if (c && c->vc_sw == &vga_con) { + /* force hi font mask to 0, so we always clear + the bit on either transition */ + c->vc_hi_font_mask = 0x00; + clear_buffer_attributes(c); + c->vc_hi_font_mask = ch512 ? 0x0800 : 0; + } + } + } return 0; } diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index c6ce416ab587..90f13152d213 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1642,7 +1642,9 @@ static int do_register_framebuffer(struct fb_info *fb_info) event.info = fb_info; if (!lock_fb_info(fb_info)) return -ENODEV; + console_lock(); fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); + console_unlock(); unlock_fb_info(fb_info); return 0; } @@ -1658,8 +1660,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) if (!lock_fb_info(fb_info)) return -ENODEV; + console_lock(); event.info = fb_info; ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); + console_unlock(); unlock_fb_info(fb_info); if (ret) @@ -1674,7 +1678,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) num_registered_fb--; fb_cleanup_device(fb_info); event.info = fb_info; + console_lock(); fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); + console_unlock(); /* this may free fb info */ put_fb_info(fb_info); @@ -1845,11 +1851,8 @@ int fb_new_modelist(struct fb_info *info) err = 1; if (!list_empty(&info->modelist)) { - if (!lock_fb_info(info)) - return -ENODEV; event.info = info; err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); - unlock_fb_info(info); } return err; diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 67afa9c2289d..303fb9f35b2a 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device, if (i * sizeof(struct fb_videomode) != count) return -EINVAL; + if (!lock_fb_info(fb_info)) + return -ENODEV; console_lock(); list_splice(&fb_info->modelist, &old_list); fb_videomode_to_modelist((const struct fb_videomode *)buf, i, @@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device, fb_destroy_modelist(&old_list); console_unlock(); + unlock_fb_info(fb_info); return 0; } diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 6af3f16754f0..d02a538518c2 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -923,7 +923,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel) #define PF_COMP_0_MASK 0x0000000F #define PF_COMP_0_SHIFT 0 -#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \ +#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \ @@ -933,10 +933,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel) switch (bits_per_pixel) { case 32: /* 0x88883316 */ - return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8); + return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8); case 24: /* 0x88082219 */ - return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8); + return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0); case 16: /* 0x65053118 */ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0); diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 4a89f889852d..9f8999223f60 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -366,7 +366,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info) loop--; } - writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR); + reg = readl(host->base + LCDC_VDCTRL4); + writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4); clk_disable_unprepare(host->clk); diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c index a159b63e18b9..85d81103c1f9 100644 --- a/drivers/video/udlfb.c +++ b/drivers/video/udlfb.c @@ -647,7 +647,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, result = fb_sys_write(info, buf, count, ppos); if (result > 0) { - int start = max((int)(offset / info->fix.line_length) - 1, 0); + int start = max((int)(offset / info->fix.line_length), 0); int lines = min((u32)((result / info->fix.line_length) + 1), (u32)info->var.yres); diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c index af8f26b643c1..db1e39277e32 100644 --- a/drivers/video/via/via_clock.c +++ b/drivers/video/via/via_clock.c @@ -25,6 +25,7 @@ #include <linux/kernel.h> #include <linux/via-core.h> +#include <asm/olpc.h> #include "via_clock.h" #include "global.h" #include "debug.h" @@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config) printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap); } +static void noop_set_clock_state(u8 state) +{ +} + void via_clock_init(struct via_clock *clock, int gfx_chip) { switch (gfx_chip) { @@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip) break; } + + if (machine_is_olpc()) { + /* The OLPC XO-1.5 cannot suspend/resume reliably if the + * IGA1/IGA2 clocks are set as on or off (memory rot + * occasionally happens during suspend under such + * configurations). + * + * The only known stable scenario is to leave this bits as-is, + * which in their default states are documented to enable the + * clock only when it is needed. + */ + clock->set_primary_clock_state = noop_set_clock_state; + clock->set_secondary_clock_state = noop_set_clock_state; + } } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5aa43c3392a2..52bfd0786fc8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -132,6 +132,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, unsigned head; int i; + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); + desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 9f13b897fd64..6019929a54a5 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -806,6 +806,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, hpwdt_timer_reg = pci_mem_addr + 0x70; hpwdt_timer_con = pci_mem_addr + 0x72; + /* Make sure that timer is disabled until /dev/watchdog is opened */ + hpwdt_stop(); + /* Make sure that we have a valid soft_margin */ if (hpwdt_change_timer(soft_margin)) hpwdt_change_timer(DEFAULT_MARGIN); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6908e4ce2a0d..26c47a4c4269 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -1365,8 +1365,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - exit_idle(); irq_enter(); + exit_idle(); __xen_evtchn_do_upcall(); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index b1f60a0c0bea..b2db77e5a593 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) u->name, (void *)(unsigned long)port); if (rc >= 0) rc = evtchn_make_refcounted(port); + else { + /* bind failed, should close the port now */ + struct evtchn_close close; + close.port = port; + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) + BUG(); + set_port_user(port, NULL); + } return rc; } @@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); + BUG_ON(irq < 0); + unbind_from_irqhandler(irq, (void *)(unsigned long)port); set_port_user(port, NULL); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 1ffd03bf8e10..9a113b714b47 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -105,6 +105,21 @@ static void gntdev_print_maps(struct gntdev_priv *priv, #endif } +static void gntdev_free_map(struct grant_map *map) +{ + if (map == NULL) + return; + + if (map->pages) + free_xenballooned_pages(map->count, map->pages); + kfree(map->pages); + kfree(map->grants); + kfree(map->map_ops); + kfree(map->unmap_ops); + kfree(map->kmap_ops); + kfree(map); +} + static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) { struct grant_map *add; @@ -142,12 +157,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) return add; err: - kfree(add->pages); - kfree(add->grants); - kfree(add->map_ops); - kfree(add->unmap_ops); - kfree(add->kmap_ops); - kfree(add); + gntdev_free_map(add); return NULL; } @@ -198,17 +208,9 @@ static void gntdev_put_map(struct grant_map *map) evtchn_put(map->notify.event); } - if (map->pages) { - if (!use_ptemod) - unmap_grant_pages(map, 0, map->count); - - free_xenballooned_pages(map->count, map->pages); - } - kfree(map->pages); - kfree(map->grants); - kfree(map->map_ops); - kfree(map->unmap_ops); - kfree(map); + if (map->pages && !use_ptemod) + unmap_grant_pages(map, 0, map->count); + gntdev_free_map(map); } /* ------------------------------------------------------------------ */ @@ -314,8 +316,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) } } - err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, - pages, true); + err = gnttab_unmap_refs(map->unmap_ops + offset, + use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, + pages); if (err) return err; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index f100ce20b16b..7e34beed9ea9 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -53,10 +53,6 @@ /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff -#define GREFS_PER_GRANT_FRAME \ -(grant_table_version == 1 ? \ -(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ -(PAGE_SIZE / sizeof(union grant_entry_v2))) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; @@ -151,6 +147,7 @@ static struct gnttab_ops *gnttab_interface; static grant_status_t *grstatus; static int grant_table_version; +static int grefs_per_grant_frame; static struct gnttab_free_callback *gnttab_free_callback_list; @@ -679,12 +676,14 @@ static int grow_gnttab_list(unsigned int more_frames) unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; + BUG_ON(grefs_per_grant_frame == 0); + new_nr_grant_frames = nr_grant_frames + more_frames; - extra_entries = more_frames * GREFS_PER_GRANT_FRAME; + extra_entries = more_frames * grefs_per_grant_frame; - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; new_nr_glist_frames = - (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) @@ -692,12 +691,12 @@ static int grow_gnttab_list(unsigned int more_frames) } - for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; - i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) + for (i = grefs_per_grant_frame * nr_grant_frames; + i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; + gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; @@ -774,7 +773,8 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, EXPORT_SYMBOL_GPL(gnttab_map_refs); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, - struct page **pages, unsigned int count, bool clear_pte) + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count) { int i, ret; @@ -786,7 +786,8 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, return ret; for (i = 0; i < count; i++) { - ret = m2p_remove_override(pages[i], clear_pte); + ret = m2p_remove_override(pages[i], kmap_ops ? + &kmap_ops[i] : NULL); if (ret) return ret; } @@ -797,7 +798,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs); static unsigned nr_status_frames(unsigned nr_grant_frames) { - return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; + BUG_ON(grefs_per_grant_frame == 0); + return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; } static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes) @@ -955,6 +957,7 @@ static void gnttab_request_version(void) rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); if (rc == 0 && gsv.version == 2) { grant_table_version = 2; + grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { /* @@ -967,17 +970,17 @@ static void gnttab_request_version(void) panic("we need grant tables version 2, but only version 1 is available"); } else { grant_table_version = 1; + grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); gnttab_interface = &gnttab_v1_ops; } printk(KERN_INFO "Grant tables using version %d layout.\n", grant_table_version); } -int gnttab_resume(void) +static int gnttab_setup(void) { unsigned int max_nr_gframes; - gnttab_request_version(); max_nr_gframes = gnttab_max_grant_frames(); if (max_nr_gframes < nr_grant_frames) return -ENOSYS; @@ -1000,6 +1003,12 @@ int gnttab_resume(void) return 0; } +int gnttab_resume(void) +{ + gnttab_request_version(); + return gnttab_setup(); +} + int gnttab_suspend(void) { gnttab_interface->unmap_frames(); @@ -1011,9 +1020,10 @@ static int gnttab_expand(unsigned int req_entries) int rc; unsigned int cur, extra; + BUG_ON(grefs_per_grant_frame == 0); cur = nr_grant_frames; - extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / - GREFS_PER_GRANT_FRAME); + extra = ((req_entries + (grefs_per_grant_frame-1)) / + grefs_per_grant_frame); if (cur + extra > gnttab_max_grant_frames()) return -ENOSPC; @@ -1031,21 +1041,23 @@ int gnttab_init(void) unsigned int nr_init_grefs; int ret; + gnttab_request_version(); nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ + BUG_ON(grefs_per_grant_frame == 0); max_nr_glist_frames = (boot_max_nr_grant_frames * - GREFS_PER_GRANT_FRAME / RPP); + grefs_per_grant_frame / RPP); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) { @@ -1054,12 +1066,12 @@ int gnttab_init(void) } } - if (gnttab_resume() < 0) { + if (gnttab_setup() < 0) { ret = -ENODEV; goto ini_nomem; } - nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; + nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1afb4fba11b4..4d519488d304 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + dma_mask = dma_alloc_coherent_mask(hwdev, flags); phys = virt_to_phys(ret); dev_addr = xen_phys_to_bus(phys); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 097e536e8672..03342728bf23 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); - __pci_reset_function_locked(dev); - /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev); if (!dev_data->pci_saved_state) dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); - + else { + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); + __pci_reset_function_locked(dev); + } /* Now disable the device (this also ensures some private device * data is setup before we export) */ diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 75e5f1c8e028..8c4292f89eef 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path) ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&sbi->fs_lock); status = autofs4_mount_wait(dentry); - if (status) - return ERR_PTR(status); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_PENDING; + if (status) { + spin_unlock(&sbi->fs_lock); + return ERR_PTR(status); + } } done: if (!(ino->flags & AUTOFS_INF_EXPIRING)) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 16f735417072..a009b9e322ac 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1698,30 +1698,19 @@ static int elf_note_info_init(struct elf_note_info *info) return 0; info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); if (!info->psinfo) - goto notes_free; + return 0; info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); if (!info->prstatus) - goto psinfo_free; + return 0; info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); if (!info->fpu) - goto prstatus_free; + return 0; #ifdef ELF_CORE_COPY_XFPREGS info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); if (!info->xfpu) - goto fpu_free; + return 0; #endif return 1; -#ifdef ELF_CORE_COPY_XFPREGS - fpu_free: - kfree(info->fpu); -#endif - prstatus_free: - kfree(info->prstatus); - psinfo_free: - kfree(info->psinfo); - notes_free: - kfree(info->notes); - return 0; } static int fill_note_info(struct elfhdr *elf, int phdrs, diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 613aa0618235..e1724392d050 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -176,7 +176,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto _error; bprm->argc ++; - bprm->interp = iname; /* for binfmt_script */ + /* Update interp in case binfmt_script needs it. */ + retval = bprm_change_interp(iname, bprm); + if (retval < 0) + goto _error; interp_file = open_exec (iname); retval = PTR_ERR (interp_file); diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index d3b8c1f63155..df49d486b0c0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; - bprm->interp = interp; + retval = bprm_change_interp(interp, bprm); + if (retval < 0) + return retval; /* * OK, now restart the process with the interpreter's dentry. diff --git a/fs/block_dev.c b/fs/block_dev.c index ba11c30f302d..b3be92c980ee 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1047,6 +1047,7 @@ int revalidate_disk(struct gendisk *disk) mutex_lock(&bdev->bd_mutex); check_disk_size_change(disk, bdev); + bdev->bd_invalidated = 0; mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; diff --git a/fs/buffer.c b/fs/buffer.c index 0bc1bed6c4e0..18669e92b676 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head) /* * Initialise the state of a blockdev page's buffers. */ -static void +static sector_t init_page_buffers(struct page *page, struct block_device *bdev, sector_t block, int size) { @@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev, block++; bh = bh->b_this_page; } while (bh != head); + + /* + * Caller needs to validate requested block against end of device. + */ + return end_block; } /* * Create the page-cache page that contains the requested block. * - * This is user purely for blockdev mappings. + * This is used purely for blockdev mappings. */ -static struct page * +static int grow_dev_page(struct block_device *bdev, sector_t block, - pgoff_t index, int size) + pgoff_t index, int size, int sizebits) { struct inode *inode = bdev->bd_inode; struct page *page; struct buffer_head *bh; + sector_t end_block; + int ret = 0; /* Will call free_more_memory() */ page = find_or_create_page(inode->i_mapping, index, (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); if (!page) - return NULL; + return ret; BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); if (bh->b_size == size) { - init_page_buffers(page, bdev, block, size); - return page; + end_block = init_page_buffers(page, bdev, + index << sizebits, size); + goto done; } if (!try_to_free_buffers(page)) goto failed; @@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block, */ spin_lock(&inode->i_mapping->private_lock); link_dev_buffers(page, bh); - init_page_buffers(page, bdev, block, size); + end_block = init_page_buffers(page, bdev, index << sizebits, size); spin_unlock(&inode->i_mapping->private_lock); - return page; - +done: + ret = (block < end_block) ? 1 : -ENXIO; failed: unlock_page(page); page_cache_release(page); - return NULL; + return ret; } /* @@ -999,7 +1007,6 @@ failed: static int grow_buffers(struct block_device *bdev, sector_t block, int size) { - struct page *page; pgoff_t index; int sizebits; @@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) bdevname(bdev, b)); return -EIO; } - block = index << sizebits; + /* Create a page with the proper size buffers.. */ - page = grow_dev_page(bdev, block, index, size); - if (!page) - return 0; - unlock_page(page); - page_cache_release(page); - return 1; + return grow_dev_page(bdev, block, index, size, sizebits); } static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { - int ret; - struct buffer_head *bh; - /* Size must be multiple of hard sectorsize */ if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { @@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) return NULL; } -retry: - bh = __find_get_block(bdev, block, size); - if (bh) - return bh; + for (;;) { + struct buffer_head *bh; + int ret; - ret = grow_buffers(bdev, block, size); - if (ret == 0) { - free_more_memory(); - goto retry; - } else if (ret > 0) { bh = __find_get_block(bdev, block, size); if (bh) return bh; + + ret = grow_buffers(bdev, block, size); + if (ret < 0) + return NULL; + if (ret == 0) + free_more_memory(); } - return NULL; } /* @@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block); * which corresponds to the passed block_device, block and size. The * returned buffer has its reference count incremented. * - * __getblk() cannot fail - it just keeps trying. If you pass it an - * illegal block number, __getblk() will happily return a buffer_head - * which represents the non-existent block. Very weird. - * * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() * attempt is failing. FIXME, perhaps? */ diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 173b1d22e59b..bb01881cb1f2 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -54,7 +54,12 @@ (CONGESTION_ON_THRESH(congestion_kb) - \ (CONGESTION_ON_THRESH(congestion_kb) >> 2)) - +static inline struct ceph_snap_context *page_snap_context(struct page *page) +{ + if (PagePrivate(page)) + return (void *)page->private; + return NULL; +} /* * Dirty a page. Optimistically adjust accounting, on the assumption @@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset) { struct inode *inode; struct ceph_inode_info *ci; - struct ceph_snap_context *snapc = (void *)page->private; + struct ceph_snap_context *snapc = page_snap_context(page); BUG_ON(!PageLocked(page)); - BUG_ON(!page->private); BUG_ON(!PagePrivate(page)); BUG_ON(!page->mapping); @@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g) struct inode *inode = page->mapping ? page->mapping->host : NULL; dout("%p releasepage %p idx %lu\n", inode, page, page->index); WARN_ON(PageDirty(page)); - WARN_ON(page->private); WARN_ON(PagePrivate(page)); return 0; } @@ -202,7 +205,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) dout("readpage inode %p file %p page %p index %lu\n", inode, filp, page, page->index); err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, - page->index << PAGE_CACHE_SHIFT, &len, + (u64) page_offset(page), &len, ci->i_truncate_seq, ci->i_truncate_size, &page, 1, 0); if (err == -ENOENT) @@ -264,6 +267,14 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) kfree(req->r_pages); } +static void ceph_unlock_page_vector(struct page **pages, int num_pages) +{ + int i; + + for (i = 0; i < num_pages; i++) + unlock_page(pages[i]); +} + /* * start an async read(ahead) operation. return nr_pages we submitted * a read for on success, or negative error code. @@ -283,7 +294,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) int nr_pages = 0; int ret; - off = page->index << PAGE_CACHE_SHIFT; + off = (u64) page_offset(page); /* count pages */ next_index = page->index; @@ -305,8 +316,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) NULL, 0, ci->i_truncate_seq, ci->i_truncate_size, NULL, false, 1, 0); - if (!req) - return -ENOMEM; + if (IS_ERR(req)) + return PTR_ERR(req); /* build page vector */ nr_pages = len >> PAGE_CACHE_SHIFT; @@ -344,6 +355,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) return nr_pages; out_pages: + ceph_unlock_page_vector(pages, nr_pages); ceph_release_page_vector(pages, nr_pages); out: ceph_osdc_put_request(req); @@ -423,7 +435,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) struct ceph_inode_info *ci; struct ceph_fs_client *fsc; struct ceph_osd_client *osdc; - loff_t page_off = page->index << PAGE_CACHE_SHIFT; + loff_t page_off = page_offset(page); int len = PAGE_CACHE_SIZE; loff_t i_size; int err = 0; @@ -443,7 +455,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) osdc = &fsc->client->osdc; /* verify this is a writeable snap context */ - snapc = (void *)page->private; + snapc = page_snap_context(page); if (snapc == NULL) { dout("writepage %p page %p not dirty?\n", inode, page); goto out; @@ -451,7 +463,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) oldest = get_oldest_context(inode, &snap_size); if (snapc->seq > oldest->seq) { dout("writepage %p page %p snapc %p not writeable - noop\n", - inode, page, (void *)page->private); + inode, page, snapc); /* we should only noop if called by kswapd */ WARN_ON((current->flags & PF_MEMALLOC) == 0); ceph_put_snap_context(oldest); @@ -591,7 +603,7 @@ static void writepages_finish(struct ceph_osd_request *req, clear_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); - ceph_put_snap_context((void *)page->private); + ceph_put_snap_context(page_snap_context(page)); page->private = 0; ClearPagePrivate(page); dout("unlocking %d %p\n", i, page); @@ -795,7 +807,7 @@ get_more_pages: } /* only if matching snap context */ - pgsnapc = (void *)page->private; + pgsnapc = page_snap_context(page); if (pgsnapc->seq > snapc->seq) { dout("page snapc %p %lld > oldest %p %lld\n", pgsnapc, pgsnapc->seq, snapc, snapc->seq); @@ -814,8 +826,7 @@ get_more_pages: /* ok */ if (locked_pages == 0) { /* prepare async write request */ - offset = (unsigned long long)page->index - << PAGE_CACHE_SHIFT; + offset = (u64) page_offset(page); len = wsize; req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, @@ -829,8 +840,8 @@ get_more_pages: ci->i_truncate_size, &inode->i_mtime, true, 1, 0); - if (!req) { - rc = -ENOMEM; + if (IS_ERR(req)) { + rc = PTR_ERR(req); unlock_page(page); break; } @@ -984,7 +995,7 @@ retry_locked: BUG_ON(!ci->i_snap_realm); down_read(&mdsc->snap_rwsem); BUG_ON(!ci->i_snap_realm->cached_context); - snapc = (void *)page->private; + snapc = page_snap_context(page); if (snapc && snapc != ci->i_head_snapc) { /* * this page is already dirty in another (older) snap @@ -1177,7 +1188,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct inode *inode = vma->vm_file->f_dentry->d_inode; struct page *page = vmf->page; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; - loff_t off = page->index << PAGE_CACHE_SHIFT; + loff_t off = page_offset(page); loff_t size, len; int ret; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 620daad201db..e7d40776f58f 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1349,11 +1349,15 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) if (!ci->i_head_snapc) ci->i_head_snapc = ceph_get_snap_context( ci->i_snap_realm->cached_context); - dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode, - ci->i_head_snapc); + dout(" inode %p now dirty snapc %p auth cap %p\n", + &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); spin_lock(&mdsc->cap_dirty_lock); - list_add(&ci->i_dirty_item, &mdsc->cap_dirty); + if (ci->i_auth_cap) + list_add(&ci->i_dirty_item, &mdsc->cap_dirty); + else + list_add(&ci->i_dirty_item, + &mdsc->cap_dirty_migrating); spin_unlock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { ihold(inode); @@ -2388,7 +2392,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, &atime); /* max size increase? */ - if (max_size != ci->i_max_size) { + if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); ci->i_max_size = max_size; if (max_size >= ci->i_wanted_max_size) { @@ -2745,6 +2749,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc, /* make sure we re-request max_size, if necessary */ spin_lock(&ci->i_ceph_lock); + ci->i_wanted_max_size = 0; /* reset */ ci->i_requested_max_size = 0; spin_unlock(&ci->i_ceph_lock); } @@ -2840,8 +2845,6 @@ void ceph_handle_caps(struct ceph_mds_session *session, case CEPH_CAP_OP_IMPORT: handle_cap_import(mdsc, inode, h, session, snaptrace, snaptrace_len); - ceph_check_caps(ceph_inode(inode), 0, session); - goto done_unlocked; } /* the rest require a cap */ @@ -2858,6 +2861,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, switch (op) { case CEPH_CAP_OP_REVOKE: case CEPH_CAP_OP_GRANT: + case CEPH_CAP_OP_IMPORT: handle_cap_grant(inode, h, session, cap, msg->middle); goto done_unlocked; diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index fb962efdacee..6d59006bfa27 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) int err = -ENOMEM; dout("ceph_fs_debugfs_init\n"); + BUG_ON(!fsc->client->debugfs_dir); fsc->debugfs_congestion_kb = debugfs_create_file("writeback_congestion_kb", 0600, diff --git a/fs/ceph/export.c b/fs/ceph/export.c index fbb2a643ef10..4098ccf8cb98 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -89,7 +89,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, * FIXME: we should try harder by querying the mds for the ino. */ static struct dentry *__fh_to_dentry(struct super_block *sb, - struct ceph_nfs_fh *fh) + struct ceph_nfs_fh *fh, int fh_len) { struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; struct inode *inode; @@ -97,6 +97,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, struct ceph_vino vino; int err; + if (fh_len < sizeof(*fh) / 4) + return ERR_PTR(-ESTALE); + dout("__fh_to_dentry %llx\n", fh->ino); vino.ino = fh->ino; vino.snap = CEPH_NOSNAP; @@ -140,7 +143,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, * convert connectable fh to dentry */ static struct dentry *__cfh_to_dentry(struct super_block *sb, - struct ceph_nfs_confh *cfh) + struct ceph_nfs_confh *cfh, int fh_len) { struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; struct inode *inode; @@ -148,6 +151,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb, struct ceph_vino vino; int err; + if (fh_len < sizeof(*cfh) / 4) + return ERR_PTR(-ESTALE); + dout("__cfh_to_dentry %llx (%llx/%x)\n", cfh->ino, cfh->parent_ino, cfh->parent_name_hash); @@ -197,9 +203,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if (fh_type == 1) - return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw); + return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw, + fh_len); else - return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw); + return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw, + fh_len); } /* @@ -220,6 +228,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb, if (fh_type == 1) return ERR_PTR(-ESTALE); + if (fh_len < sizeof(*cfh) / 4) + return ERR_PTR(-ESTALE); pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino, cfh->parent_name_hash); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ed72428d9c75..9ce3a4bd4d1c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -529,8 +529,8 @@ more: do_sync, ci->i_truncate_seq, ci->i_truncate_size, &mtime, false, 2, page_align); - if (!req) - return -ENOMEM; + if (IS_ERR(req)) + return PTR_ERR(req); if (file->f_flags & O_DIRECT) { pages = ceph_get_direct_page_vector(data, num_pages, false); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9fff9f3b17e4..81613bced19f 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, if (rinfo->head->is_dentry) { struct inode *dir = req->r_locked_dir; - err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, - session, req->r_request_started, -1, - &req->r_caps_reservation); - if (err < 0) - return err; + if (dir) { + err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, + session, req->r_request_started, -1, + &req->r_caps_reservation); + if (err < 0) + return err; + } else { + WARN_ON_ONCE(1); + } } /* @@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, * will have trouble splicing in the virtual snapdir later */ if (rinfo->head->is_dentry && !req->r_aborted && + req->r_locked_dir && (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, fsc->mount_options->snapdir_name, req->r_dentry->d_name.len))) { @@ -1461,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); u64 to; - int wrbuffer_refs, wake = 0; + int wrbuffer_refs, finish = 0; retry: spin_lock(&ci->i_ceph_lock); @@ -1493,15 +1498,18 @@ retry: truncate_inode_pages(inode->i_mapping, to); spin_lock(&ci->i_ceph_lock); - ci->i_truncate_pending--; - if (ci->i_truncate_pending == 0) - wake = 1; + if (to == ci->i_truncate_size) { + ci->i_truncate_pending = 0; + finish = 1; + } spin_unlock(&ci->i_ceph_lock); + if (!finish) + goto retry; if (wrbuffer_refs == 0) ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); - if (wake) - wake_up_all(&ci->i_cap_wq); + + wake_up_all(&ci->i_cap_wq); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 89971e137aab..3fd08ad5c478 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -334,10 +334,10 @@ void ceph_put_mds_session(struct ceph_mds_session *s) dout("mdsc put_session %p %d -> %d\n", s, atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); if (atomic_dec_and_test(&s->s_ref)) { - if (s->s_authorizer) + if (s->s_auth.authorizer) s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( s->s_mdsc->fsc->client->monc.auth, - s->s_authorizer); + s->s_auth.authorizer); kfree(s); } } @@ -394,11 +394,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, s->s_seq = 0; mutex_init(&s->s_mutex); - ceph_con_init(mdsc->fsc->client->msgr, &s->s_con); - s->s_con.private = s; - s->s_con.ops = &mds_con_ops; - s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; - s->s_con.peer_name.num = cpu_to_le64(mds); + ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); spin_lock_init(&s->s_gen_ttl_lock); s->s_cap_gen = 0; @@ -440,7 +436,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, mdsc->sessions[mds] = s; atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ - ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); + ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, + ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); return s; @@ -1889,9 +1886,14 @@ finish: static void __wake_requests(struct ceph_mds_client *mdsc, struct list_head *head) { - struct ceph_mds_request *req, *nreq; + struct ceph_mds_request *req; + LIST_HEAD(tmp_list); + + list_splice_init(head, &tmp_list); - list_for_each_entry_safe(req, nreq, head, r_wait) { + while (!list_empty(&tmp_list)) { + req = list_entry(tmp_list.next, + struct ceph_mds_request, r_wait); list_del_init(&req->r_wait); __do_request(mdsc, req); } @@ -2531,7 +2533,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, session->s_state = CEPH_MDS_SESSION_RECONNECTING; session->s_seq = 0; + ceph_con_close(&session->s_con); ceph_con_open(&session->s_con, + CEPH_ENTITY_TYPE_MDS, mds, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); /* replay unsafe requests */ @@ -2636,7 +2640,8 @@ static void check_new_map(struct ceph_mds_client *mdsc, ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", session_state_name(s->s_state)); - if (memcmp(ceph_mdsmap_get_addr(oldmap, i), + if (i >= newmap->m_max_mds || + memcmp(ceph_mdsmap_get_addr(oldmap, i), ceph_mdsmap_get_addr(newmap, i), sizeof(struct ceph_entity_addr))) { if (s->s_state == CEPH_MDS_SESSION_OPENING) { @@ -3395,39 +3400,33 @@ out: /* * authentication */ -static int get_authorizer(struct ceph_connection *con, - void **buf, int *len, int *proto, - void **reply_buf, int *reply_len, int force_new) + +/* + * Note: returned pointer is the address of a structure that's + * managed separately. Caller must *not* attempt to free it. + */ +static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, + int *proto, int force_new) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; - int ret = 0; - - if (force_new && s->s_authorizer) { - ac->ops->destroy_authorizer(ac, s->s_authorizer); - s->s_authorizer = NULL; - } - if (s->s_authorizer == NULL) { - if (ac->ops->create_authorizer) { - ret = ac->ops->create_authorizer( - ac, CEPH_ENTITY_TYPE_MDS, - &s->s_authorizer, - &s->s_authorizer_buf, - &s->s_authorizer_buf_len, - &s->s_authorizer_reply_buf, - &s->s_authorizer_reply_buf_len); - if (ret) - return ret; - } - } + struct ceph_auth_handshake *auth = &s->s_auth; + if (force_new && auth->authorizer) { + if (ac->ops && ac->ops->destroy_authorizer) + ac->ops->destroy_authorizer(ac, auth->authorizer); + auth->authorizer = NULL; + } + if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { + int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, + auth); + if (ret) + return ERR_PTR(ret); + } *proto = ac->protocol; - *buf = s->s_authorizer_buf; - *len = s->s_authorizer_buf_len; - *reply_buf = s->s_authorizer_reply_buf; - *reply_len = s->s_authorizer_reply_buf_len; - return 0; + + return auth; } @@ -3437,7 +3436,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; - return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); + return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 8c7c04ebb595..dd26846dd71d 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -11,6 +11,7 @@ #include <linux/ceph/types.h> #include <linux/ceph/messenger.h> #include <linux/ceph/mdsmap.h> +#include <linux/ceph/auth.h> /* * Some lock dependencies: @@ -113,9 +114,7 @@ struct ceph_mds_session { struct ceph_connection s_con; - struct ceph_authorizer *s_authorizer; - void *s_authorizer_buf, *s_authorizer_reply_buf; - size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; + struct ceph_auth_handshake s_auth; /* protected by s_gen_ttl_lock */ spinlock_t s_gen_ttl_lock; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 1e67dd7305a4..f36391815461 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -387,8 +387,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",mount_timeout=%d", opt->mount_timeout); if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl); - if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT) - seq_printf(m, ",osdtimeout=%d", opt->osd_timeout); if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, ",osdkeepalivetimeout=%d", opt->osd_keepalive_timeout); diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 6873bb634a97..22631445a4ac 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -226,6 +226,8 @@ compose_mount_options_out: compose_mount_options_err: kfree(mountdata); mountdata = ERR_PTR(rc); + kfree(*devname); + *devname = NULL; goto compose_mount_options_out; } diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index fbb9da951843..6a8568c6466f 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, int i; wchar_t wchar_to; /* needed to quiet sparse */ + /* special case for utf8 to handle no plane0 chars */ + if (!strcmp(codepage->charset, "utf8")) { + /* + * convert utf8 -> utf16, we assume we have enough space + * as caller should have assumed conversion does not overflow + * in destination len is length in wchar_t units (16bits) + */ + i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN, + (wchar_t *) to, len); + + /* if success terminate and exit */ + if (i >= 0) + goto success; + /* + * if fails fall back to UCS encoding as this + * function should not return negative values + * currently can fail only if source contains + * invalid encoded characters + */ + } + for (i = 0; len && *from; i++, from += charlen, len -= charlen) { charlen = codepage->char2uni(from, len, &wchar_to); if (charlen < 1) { @@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, put_unaligned_le16(wchar_to, &to[i]); } +success: put_unaligned_le16(0, &to[i]); return i; } @@ -328,6 +350,6 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, } ctoUTF16_out: - return i; + return j; } diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 3cc1b251ca08..6ccf176427f0 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -225,6 +225,13 @@ sid_to_str(struct cifs_sid *sidptr, char *sidstr) } static void +cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src) +{ + memcpy(dst, src, sizeof(*dst)); + dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS); +} + +static void id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr, struct cifs_sid_id **psidid, char *typestr) { @@ -248,7 +255,7 @@ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr, } } - memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid)); + cifs_copy_sid(&(*psidid)->sid, sidptr); (*psidid)->time = jiffies - (SID_MAP_RETRY + 1); (*psidid)->refcount = 0; @@ -354,7 +361,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) * any fields of the node after a reference is put . */ if (test_bit(SID_ID_MAPPED, &psidid->state)) { - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); + cifs_copy_sid(ssid, &psidid->sid); psidid->time = jiffies; /* update ts for accessing */ goto id_sid_out; } @@ -370,14 +377,14 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) if (IS_ERR(sidkey)) { rc = -EINVAL; cFYI(1, "%s: Can't map and id to a SID", __func__); + } else if (sidkey->datalen < sizeof(struct cifs_sid)) { + rc = -EIO; + cFYI(1, "%s: Downcall contained malformed key " + "(datalen=%hu)", __func__, sidkey->datalen); } else { lsid = (struct cifs_sid *)sidkey->payload.data; - memcpy(&psidid->sid, lsid, - sidkey->datalen < sizeof(struct cifs_sid) ? - sidkey->datalen : sizeof(struct cifs_sid)); - memcpy(ssid, &psidid->sid, - sidkey->datalen < sizeof(struct cifs_sid) ? - sidkey->datalen : sizeof(struct cifs_sid)); + cifs_copy_sid(&psidid->sid, lsid); + cifs_copy_sid(ssid, &psidid->sid); set_bit(SID_ID_MAPPED, &psidid->state); key_put(sidkey); kfree(psidid->sidstr); @@ -396,7 +403,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); + cifs_copy_sid(ssid, &psidid->sid); else rc = -EINVAL; } @@ -675,8 +682,6 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid) static void copy_sec_desc(const struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, __u32 sidsoffset) { - int i; - struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr; @@ -692,26 +697,14 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd, owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset); - - nowner_sid_ptr->revision = owner_sid_ptr->revision; - nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth; - for (i = 0; i < 6; i++) - nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i]; - for (i = 0; i < 5; i++) - nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i]; + cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr); /* copy group sid */ group_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset + sizeof(struct cifs_sid)); - - ngroup_sid_ptr->revision = group_sid_ptr->revision; - ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth; - for (i = 0; i < 6; i++) - ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i]; - for (i = 0; i < 5; i++) - ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i]; + cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr); return; } @@ -1120,8 +1113,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, kfree(nowner_sid_ptr); return rc; } - memcpy(owner_sid_ptr, nowner_sid_ptr, - sizeof(struct cifs_sid)); + cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr); kfree(nowner_sid_ptr); *aclflag = CIFS_ACL_OWNER; } @@ -1139,8 +1131,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, kfree(ngroup_sid_ptr); return rc; } - memcpy(group_sid_ptr, ngroup_sid_ptr, - sizeof(struct cifs_sid)); + cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr); kfree(ngroup_sid_ptr); *aclflag = CIFS_ACL_GROUP; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 65a78e9a5d40..f771e9f98f9f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -70,6 +70,7 @@ enum { /* Mount options that take no arguments */ Opt_user_xattr, Opt_nouser_xattr, Opt_forceuid, Opt_noforceuid, + Opt_forcegid, Opt_noforcegid, Opt_noblocksend, Opt_noautotune, Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_mapchars, Opt_nomapchars, Opt_sfu, @@ -121,6 +122,8 @@ static const match_table_t cifs_mount_option_tokens = { { Opt_nouser_xattr, "nouser_xattr" }, { Opt_forceuid, "forceuid" }, { Opt_noforceuid, "noforceuid" }, + { Opt_forcegid, "forcegid" }, + { Opt_noforcegid, "noforcegid" }, { Opt_noblocksend, "noblocksend" }, { Opt_noautotune, "noautotune" }, { Opt_hard, "hard" }, @@ -1287,6 +1290,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, case Opt_noforceuid: override_uid = 0; break; + case Opt_forcegid: + override_gid = 1; + break; + case Opt_noforcegid: + override_gid = 0; + break; case Opt_noblocksend: vol->noblocksnd = 1; break; diff --git a/fs/compat.c b/fs/compat.c index f2944ace7a7b..2b371b38911d 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1160,11 +1160,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_readv(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_readv(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } @@ -1226,11 +1229,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_writev(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_writev(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index debdfe0fc809..5d2069ff635c 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, err = get_user(palp, &up->palette); err |= get_user(length, &up->length); + if (err) + return -EFAULT; up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); err = put_user(compat_ptr(palp), &up_native->palette); diff --git a/fs/dcache.c b/fs/dcache.c index b80531c91779..f104945dcc7d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -373,7 +373,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) * Inform try_to_ascend() that we are no longer attached to the * dentry tree */ - dentry->d_flags |= DCACHE_DISCONNECTED; + dentry->d_flags |= DCACHE_DENTRY_KILLED; if (parent) spin_unlock(&parent->d_lock); dentry_iput(dentry); @@ -1030,7 +1030,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq * or deletion */ if (new != old->d_parent || - (old->d_flags & DCACHE_DISCONNECTED) || + (old->d_flags & DCACHE_DENTRY_KILLED) || (!locked && read_seqretry(&rename_lock, seq))) { spin_unlock(&new->d_lock); new = NULL; @@ -1116,6 +1116,8 @@ positive: return 1; rename_retry: + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; @@ -1218,6 +1220,8 @@ out: rename_retry: if (found) return found; + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; @@ -2963,6 +2967,8 @@ resume: return; rename_retry: + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; diff --git a/fs/direct-io.c b/fs/direct-io.c index f4aadd15b613..29c4fdab29db 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -305,9 +305,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is dio->end_io(dio->iocb, offset, transferred, dio->private, ret, is_async); } else { + inode_dio_done(dio->inode); if (is_async) aio_complete(dio->iocb, ret, 0); - inode_dio_done(dio->inode); } return ret; diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 867b64c5d84f..56e3aa57e188 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -568,6 +568,8 @@ struct ecryptfs_open_req { struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb); void ecryptfs_i_size_init(const char *page_virt, struct inode *inode); +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 2b17f2f9b121..d45ba4568128 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -138,29 +138,50 @@ out: return rc; } -static void ecryptfs_vma_close(struct vm_area_struct *vma) -{ - filemap_write_and_wait(vma->vm_file->f_mapping); -} - -static const struct vm_operations_struct ecryptfs_file_vm_ops = { - .close = ecryptfs_vma_close, - .fault = filemap_fault, -}; +struct kmem_cache *ecryptfs_file_info_cache; -static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int read_or_initialize_metadata(struct dentry *dentry) { + struct inode *inode = dentry->d_inode; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct ecryptfs_crypt_stat *crypt_stat; int rc; - rc = generic_file_mmap(file, vma); + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; + mount_crypt_stat = &ecryptfs_superblock_to_private( + inode->i_sb)->mount_crypt_stat; + mutex_lock(&crypt_stat->cs_mutex); + + if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED && + crypt_stat->flags & ECRYPTFS_KEY_VALID) { + rc = 0; + goto out; + } + + rc = ecryptfs_read_metadata(dentry); if (!rc) - vma->vm_ops = &ecryptfs_file_vm_ops; + goto out; + + if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) { + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED + | ECRYPTFS_ENCRYPTED); + rc = 0; + goto out; + } + + if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) && + !i_size_read(ecryptfs_inode_to_lower(inode))) { + rc = ecryptfs_initialize_file(dentry, inode); + if (!rc) + goto out; + } + rc = -EIO; +out: + mutex_unlock(&crypt_stat->cs_mutex); return rc; } -struct kmem_cache *ecryptfs_file_info_cache; - /** * ecryptfs_open * @inode: inode speciying file to open @@ -236,32 +257,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file) rc = 0; goto out; } - mutex_lock(&crypt_stat->cs_mutex); - if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) - || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { - rc = ecryptfs_read_metadata(ecryptfs_dentry); - if (rc) { - ecryptfs_printk(KERN_DEBUG, - "Valid headers not found\n"); - if (!(mount_crypt_stat->flags - & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { - rc = -EIO; - printk(KERN_WARNING "Either the lower file " - "is not in a valid eCryptfs format, " - "or the key could not be retrieved. " - "Plaintext passthrough mode is not " - "enabled; returning -EIO\n"); - mutex_unlock(&crypt_stat->cs_mutex); - goto out_put; - } - rc = 0; - crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED - | ECRYPTFS_ENCRYPTED); - mutex_unlock(&crypt_stat->cs_mutex); - goto out; - } - } - mutex_unlock(&crypt_stat->cs_mutex); + rc = read_or_initialize_metadata(ecryptfs_dentry); + if (rc) + goto out_put; ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = " "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, (unsigned long long)i_size_read(inode)); @@ -277,8 +275,14 @@ out: static int ecryptfs_flush(struct file *file, fl_owner_t td) { - return file->f_mode & FMODE_WRITE - ? filemap_write_and_wait(file->f_mapping) : 0; + struct file *lower_file = ecryptfs_file_to_lower(file); + + if (lower_file->f_op && lower_file->f_op->flush) { + filemap_write_and_wait(file->f_mapping); + return lower_file->f_op->flush(lower_file, td); + } + + return 0; } static int ecryptfs_release(struct inode *inode, struct file *file) @@ -292,15 +296,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file) static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - int rc = 0; - - rc = generic_file_fsync(file, start, end, datasync); - if (rc) - goto out; - rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end, - datasync); -out: - return rc; + return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } static int ecryptfs_fasync(int fd, struct file *file, int flag) @@ -369,7 +365,7 @@ const struct file_operations ecryptfs_main_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif - .mmap = ecryptfs_file_mmap, + .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index ab35b113003b..11030b2fd3b4 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -143,6 +143,31 @@ static int ecryptfs_interpose(struct dentry *lower_dentry, return 0; } +static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); + struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); + struct dentry *lower_dir_dentry; + int rc; + + dget(lower_dentry); + lower_dir_dentry = lock_parent(lower_dentry); + rc = vfs_unlink(lower_dir_inode, lower_dentry); + if (rc) { + printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); + goto out_unlock; + } + fsstack_copy_attr_times(dir, lower_dir_inode); + set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); + inode->i_ctime = dir->i_ctime; + d_drop(dentry); +out_unlock: + unlock_dir(lower_dir_dentry); + dput(lower_dentry); + return rc; +} + /** * ecryptfs_do_create * @directory_inode: inode of the new file's dentry's parent in ecryptfs @@ -182,8 +207,10 @@ ecryptfs_do_create(struct inode *directory_inode, } inode = __ecryptfs_get_inode(lower_dentry->d_inode, directory_inode->i_sb); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + vfs_unlink(lower_dir_dentry->d_inode, lower_dentry); goto out_lock; + } fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); out_lock: @@ -200,8 +227,8 @@ out: * * Returns zero on success */ -static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, - struct inode *ecryptfs_inode) +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; @@ -265,7 +292,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, * that this on disk file is prepared to be an ecryptfs file */ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); if (rc) { - drop_nlink(ecryptfs_inode); + ecryptfs_do_unlink(directory_inode, ecryptfs_dentry, + ecryptfs_inode); + make_bad_inode(ecryptfs_inode); unlock_new_inode(ecryptfs_inode); iput(ecryptfs_inode); goto out; @@ -477,27 +506,7 @@ out_lock: static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) { - int rc = 0; - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); - struct dentry *lower_dir_dentry; - - dget(lower_dentry); - lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_unlink(lower_dir_inode, lower_dentry); - if (rc) { - printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); - goto out_unlock; - } - fsstack_copy_attr_times(dir, lower_dir_inode); - set_nlink(dentry->d_inode, - ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink); - dentry->d_inode->i_ctime = dir->i_ctime; - d_drop(dentry); -out_unlock: - unlock_dir(lower_dir_dentry); - dput(lower_dentry); - return rc; + return ecryptfs_do_unlink(dir, dentry, dentry->d_inode); } static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, @@ -621,6 +630,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; struct dentry *trap = NULL; + struct inode *target_inode; lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); @@ -628,6 +638,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); + target_inode = new_dentry->d_inode; trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); /* source should not be ancestor of target */ if (trap == lower_old_dentry) { @@ -643,6 +654,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, lower_new_dir_dentry->d_inode, lower_new_dentry); if (rc) goto out_lock; + if (target_inode) + fsstack_copy_attr_all(target_inode, + ecryptfs_inode_to_lower(target_inode)); fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); if (new_dir != old_dir) fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); @@ -1002,12 +1016,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) goto out; } - if (S_ISREG(inode->i_mode)) { - rc = filemap_write_and_wait(inode->i_mapping); - if (rc) - goto out; - fsstack_copy_attr_all(inode, lower_inode); - } memcpy(&lower_ia, ia, sizeof(lower_ia)); if (ia->ia_valid & ATTR_FILE) lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 68954937a071..240832e8b289 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode) inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { + filemap_write_and_wait(inode->i_mapping); fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); @@ -279,6 +280,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; + u8 cipher_code; *check_ruid = 0; @@ -420,6 +422,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; + + cipher_code = ecryptfs_code_for_cipher_string( + mount_crypt_stat->global_default_cipher_name, + mount_crypt_stat->global_default_cipher_key_size); + if (!cipher_code) { + ecryptfs_printk(KERN_ERR, + "eCryptfs doesn't support cipher: %s", + mount_crypt_stat->global_default_cipher_name); + rc = -EINVAL; + goto out; + } + mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { @@ -505,7 +519,6 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } - s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; @@ -541,6 +554,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); + + /** + * Set the POSIX ACL flag based on whether they're enabled in the lower + * mount. Force a read-only eCryptfs mount if the lower mount is ro. + * Allow a ro eCryptfs mount even when the lower mount is rw. + */ + s->s_flags = flags & ~MS_POSIXACL; + s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index a46b3a8fee1e..bd1d57f98f74 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) { int rc; - /* - * Refuse to write the page out if we are called from reclaim context - * since our writepage() path may potentially allocate memory when - * calling into the lower fs vfs_write() which may in turn invoke - * us again. - */ - if (current->flags & PF_MEMALLOC) { - redirty_page_for_writepage(wbc, page); - rc = 0; - goto out; - } - rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " @@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file, struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; int rc; - int need_unlock_page = 1; ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); @@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file, "zeros in page with index = [0x%.16lx]\n", index); goto out; } - set_page_dirty(page); - unlock_page(page); - need_unlock_page = 0; + rc = ecryptfs_encrypt_page(page); + if (rc) { + ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " + "index [0x%.16lx])\n", index); + goto out; + } if (pos + copied > i_size_read(ecryptfs_inode)) { i_size_write(ecryptfs_inode, pos + copied); ecryptfs_printk(KERN_DEBUG, "Expanded file size to " "[0x%.16llx]\n", (unsigned long long)i_size_read(ecryptfs_inode)); - balance_dirty_pages_ratelimited(mapping); - rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); - if (rc) { - printk(KERN_ERR "Error writing inode size to metadata; " - "rc = [%d]\n", rc); - goto out; - } } - rc = copied; + rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); + if (rc) + printk(KERN_ERR "Error writing inode size to metadata; " + "rc = [%d]\n", rc); + else + rc = copied; out: - if (need_unlock_page) - unlock_page(page); + unlock_page(page); page_cache_release(page); return rc; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 079d1be65ba9..0863bab42c4d 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1285,7 +1285,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even * otherwise we might miss an event that happens between the * f_op->poll() call and the new event set registering. */ - epi->event.events = event->events; + epi->event.events = event->events; /* need barrier below */ pt._key = event->events; epi->event.data = event->data; /* protected by mtx */ if (epi->event.events & EPOLLWAKEUP) { @@ -1296,6 +1296,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even } /* + * The following barrier has two effects: + * + * 1) Flush epi changes above to other CPUs. This ensures + * we do not miss events from ep_poll_callback if an + * event occurs immediately after we call f_op->poll(). + * We need this because we did not take ep->lock while + * changing epi above (but ep_poll_callback does take + * ep->lock). + * + * 2) We also need to ensure we do not miss _past_ events + * when calling f_op->poll(). This barrier also + * pairs with the barrier in wq_has_sleeper (see + * comments for wq_has_sleeper). + * + * This barrier will now guarantee ep_poll_callback or f_op->poll + * (or both) will notice the readiness of an item. + */ + smp_mb(); + + /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ diff --git a/fs/exec.c b/fs/exec.c index 9ba382d7e726..c136a5e750fc 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1114,7 +1114,8 @@ int flush_old_exec(struct linux_binprm * bprm) bprm->mm = NULL; /* We're using it now */ set_fs(USER_DS); - current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD); + current->flags &= + ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE); flush_thread(); current->personality &= ~bprm->per_clear; @@ -1205,9 +1206,24 @@ void free_bprm(struct linux_binprm *bprm) mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } + /* If a binfmt changed the interp, free it. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); kfree(bprm); } +int bprm_change_interp(char *interp, struct linux_binprm *bprm) +{ + /* If a binfmt changed the interp, free it first. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); + bprm->interp = kstrdup(interp, GFP_KERNEL); + if (!bprm->interp) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(bprm_change_interp); + /* * install the new credentials for this executable */ diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 10d7812f6021..075281734fcb 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3068,6 +3068,8 @@ static int ext3_do_update_inode(handle_t *handle, struct ext3_inode_info *ei = EXT3_I(inode); struct buffer_head *bh = iloc->bh; int err = 0, rc, block; + int need_datasync = 0; + __le32 disksize; again: /* we can't allow multiple procs in here at once, its a bit racey */ @@ -3105,7 +3107,11 @@ again: raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); - raw_inode->i_size = cpu_to_le32(ei->i_disksize); + disksize = cpu_to_le32(ei->i_disksize); + if (disksize != raw_inode->i_size) { + need_datasync = 1; + raw_inode->i_size = disksize; + } raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); @@ -3121,8 +3127,11 @@ again: if (!S_ISREG(inode->i_mode)) { raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); } else { - raw_inode->i_size_high = - cpu_to_le32(ei->i_disksize >> 32); + disksize = cpu_to_le32(ei->i_disksize >> 32); + if (disksize != raw_inode->i_size_high) { + raw_inode->i_size_high = disksize; + need_datasync = 1; + } if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, @@ -3175,6 +3184,8 @@ again: ext3_clear_inode_state(inode, EXT3_STATE_NEW); atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); + if (need_datasync) + atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); out_brelse: brelse (bh); ext3_std_error(inode->i_sb, err); diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index a5c29bb3b835..8535c45dfceb 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -410,8 +410,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, retry: handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + error = PTR_ERR(handle); + goto release_and_out; + } error = ext4_set_acl(handle, inode, type, acl); ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index df76291d692b..3e1018ace702 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -326,7 +326,7 @@ err_out: return 0; } /** - * ext4_read_block_bitmap() + * ext4_read_block_bitmap_nowait() * @sb: super block * @block_group: given block group * @@ -422,6 +422,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) struct buffer_head *bh; bh = ext4_read_block_bitmap_nowait(sb, block_group); + if (!bh) + return NULL; if (ext4_wait_block_bitmap(sb, block_group, bh)) { put_bh(bh); return NULL; @@ -447,11 +449,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, free_clusters = percpu_counter_read_positive(fcc); dirty_clusters = percpu_counter_read_positive(dcc); - root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es)); + + /* + * r_blocks_count should always be multiple of the cluster ratio so + * we are safe to do a plane bit shift only. + */ + root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; if (free_clusters - (nclusters + root_clusters + dirty_clusters) < EXT4_FREECLUSTERS_WATERMARK) { - free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc)); + free_clusters = percpu_counter_sum_positive(fcc); dirty_clusters = percpu_counter_sum_positive(dcc); } /* Check whether we have space after accounting for current diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 8b384cc3b75a..852d4c257ace 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -52,6 +52,9 @@ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ +#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ +#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ + static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, @@ -2107,13 +2110,14 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, * removes index from the index block. */ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path) + struct ext4_ext_path *path, int depth) { int err; ext4_fsblk_t leaf; /* free index block */ - path--; + depth--; + path = path + depth; leaf = ext4_idx_pblock(path->p_idx); if (unlikely(path->p_hdr->eh_entries == 0)) { EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); @@ -2138,6 +2142,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, ext4_free_blocks(handle, inode, NULL, leaf, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); + + while (--depth >= 0) { + if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) + break; + path--; + err = ext4_ext_get_access(handle, inode, path); + if (err) + break; + path->p_idx->ei_block = (path+1)->p_idx->ei_block; + err = ext4_ext_dirty(handle, inode, path); + if (err) + break; + } return err; } @@ -2471,7 +2488,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, /* if this leaf is free, then we should * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) - err = ext4_ext_rm_idx(handle, inode, path + depth); + err = ext4_ext_rm_idx(handle, inode, path, depth); out: return err; @@ -2672,7 +2689,7 @@ cont: /* index is empty, remove it; * handle must be already prepared by the * truncatei_leaf() */ - err = ext4_ext_rm_idx(handle, inode, path + i); + err = ext4_ext_rm_idx(handle, inode, path, i); } /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); @@ -2829,6 +2846,9 @@ static int ext4_split_extent_at(handle_t *handle, unsigned int ee_len, depth; int err = 0; + BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == + (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); + ext_debug("ext4_split_extents_at: inode %lu, logical" "block %llu\n", inode->i_ino, (unsigned long long)split); @@ -2887,7 +2907,14 @@ static int ext4_split_extent_at(handle_t *handle, err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { - err = ext4_ext_zeroout(inode, &orig_ex); + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { + if (split_flag & EXT4_EXT_DATA_VALID1) + err = ext4_ext_zeroout(inode, ex2); + else + err = ext4_ext_zeroout(inode, ex); + } else + err = ext4_ext_zeroout(inode, &orig_ex); + if (err) goto fix_extent_len; /* update the extent length and mark as initialized */ @@ -2940,12 +2967,13 @@ static int ext4_split_extent(handle_t *handle, uninitialized = ext4_ext_is_uninitialized(ex); if (map->m_lblk + map->m_len < ee_block + ee_len) { - split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? - EXT4_EXT_MAY_ZEROOUT : 0; + split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1 | EXT4_EXT_MARK_UNINIT2; + if (split_flag & EXT4_EXT_DATA_VALID2) + split_flag1 |= EXT4_EXT_DATA_VALID1; err = ext4_split_extent_at(handle, inode, path, map->m_lblk + map->m_len, split_flag1, flags1); if (err) @@ -2958,8 +2986,8 @@ static int ext4_split_extent(handle_t *handle, return PTR_ERR(path); if (map->m_lblk >= ee_block) { - split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? - EXT4_EXT_MAY_ZEROOUT : 0; + split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | + EXT4_EXT_DATA_VALID2); if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1; if (split_flag & EXT4_EXT_MARK_UNINIT2) @@ -3237,26 +3265,47 @@ static int ext4_split_unwritten_extents(handle_t *handle, split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= EXT4_EXT_MARK_UNINIT2; - + if (flags & EXT4_GET_BLOCKS_CONVERT) + split_flag |= EXT4_EXT_DATA_VALID2; flags |= EXT4_GET_BLOCKS_PRE_IO; return ext4_split_extent(handle, inode, path, map, split_flag, flags); } static int ext4_convert_unwritten_extents_endio(handle_t *handle, - struct inode *inode, - struct ext4_ext_path *path) + struct inode *inode, + struct ext4_map_blocks *map, + struct ext4_ext_path *path) { struct ext4_extent *ex; + ext4_lblk_t ee_block; + unsigned int ee_len; int depth; int err = 0; depth = ext_depth(inode); ex = path[depth].p_ext; + ee_block = le32_to_cpu(ex->ee_block); + ee_len = ext4_ext_get_actual_len(ex); ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, - (unsigned long long)le32_to_cpu(ex->ee_block), - ext4_ext_get_actual_len(ex)); + (unsigned long long)ee_block, ee_len); + + /* If extent is larger than requested then split is required */ + if (ee_block != map->m_lblk || ee_len > map->m_len) { + err = ext4_split_unwritten_extents(handle, inode, map, path, + EXT4_GET_BLOCKS_CONVERT); + if (err < 0) + goto out; + ext4_ext_drop_refs(path); + path = ext4_ext_find_extent(inode, map->m_lblk, path); + if (IS_ERR(path)) { + err = PTR_ERR(path); + goto out; + } + depth = ext_depth(inode); + ex = path[depth].p_ext; + } err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -3564,7 +3613,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, } /* IO end_io complete, convert the filled extent to written */ if ((flags & EXT4_GET_BLOCKS_CONVERT)) { - ret = ext4_convert_unwritten_extents_endio(handle, inode, + ret = ext4_convert_unwritten_extents_endio(handle, inode, map, path); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 0ee374d27fdb..902544e7ee5c 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -697,6 +697,10 @@ repeat_in_this_group: "inode=%lu", ino + 1); continue; } + BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, inode_bitmap_bh); + if (err) + goto fail; ext4_lock_group(sb, group); ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); ext4_unlock_group(sb, group); @@ -710,6 +714,11 @@ repeat_in_this_group: goto out; got: + BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); + if (err) + goto fail; + /* We may have to initialize the block bitmap if it isn't already */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { @@ -725,7 +734,6 @@ got: BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh); - brelse(block_bitmap_bh); /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); @@ -737,16 +745,12 @@ got: gdp); } ext4_unlock_group(sb, group); + brelse(block_bitmap_bh); if (err) goto fail; } - BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, inode_bitmap_bh); - if (err) - goto fail; - BUFFER_TRACE(group_desc_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, group_desc_bh); if (err) @@ -789,11 +793,6 @@ got: ext4_unlock_group(sb, group); } - BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); - if (err) - goto fail; - BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); if (err) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 55a654d86182..a0d7e2617fda 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1424,6 +1424,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) index = mpd->first_page; end = mpd->next_page - 1; + + pagevec_init(&pvec, 0); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) @@ -2386,6 +2388,16 @@ static int ext4_nonda_switch(struct super_block *sb) free_blocks = EXT4_C2B(sbi, percpu_counter_read_positive(&sbi->s_freeclusters_counter)); dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); + /* + * Start pushing delalloc when 1/2 of free blocks are dirty. + */ + if (dirty_blocks && (free_blocks < 2 * dirty_blocks) && + !writeback_in_progress(sb->s_bdi) && + down_read_trylock(&sb->s_umount)) { + writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); + up_read(&sb->s_umount); + } + if (2 * free_blocks < 3 * dirty_blocks || free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { /* @@ -2394,13 +2406,6 @@ static int ext4_nonda_switch(struct super_block *sb) */ return 1; } - /* - * Even if we don't switch but are nearing capacity, - * start pushing delalloc when 1/2 of free blocks are dirty. - */ - if (free_blocks < 2 * dirty_blocks) - writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); - return 0; } @@ -3889,6 +3894,7 @@ static int ext4_do_update_inode(handle_t *handle, struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; int err = 0, rc, block; + int need_datasync = 0; /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ @@ -3937,7 +3943,10 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); - ext4_isize_set(raw_inode, ei->i_disksize); + if (ei->i_disksize != ext4_isize(raw_inode)) { + ext4_isize_set(raw_inode, ei->i_disksize); + need_datasync = 1; + } if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, @@ -3988,7 +3997,7 @@ static int ext4_do_update_inode(handle_t *handle, err = rc; ext4_clear_inode_state(inode, EXT4_STATE_NEW); - ext4_update_inode_fsync_trans(handle, inode, 0); + ext4_update_inode_fsync_trans(handle, inode, need_datasync); out_brelse: brelse(bh); ext4_std_error(inode->i_sb, err); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 6b0a57eafb53..3122ece15147 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4126,7 +4126,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ - rcu_read_lock(); + spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); @@ -4150,12 +4150,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); - rcu_read_unlock(); + spin_unlock(&lg->lg_prealloc_lock); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, - order, lg_prealloc_count); + order, lg_prealloc_count); return; } return ; @@ -4984,8 +4984,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = range->minlen >> sb->s_blocksize_bits; - if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) || - unlikely(start >= max_blks)) + if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || + start >= max_blks || + range->len < sb->s_blocksize) return -EINVAL; if (end >= max_blks) end = max_blks - 1; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index c5826c623e7a..e2016f34b58f 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path, } /** - * mext_check_null_inode - NULL check for two inodes - * - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. - */ -static int -mext_check_null_inode(struct inode *inode1, struct inode *inode2, - const char *function, unsigned int line) -{ - int ret = 0; - - if (inode1 == NULL) { - __ext4_error(inode2->i_sb, function, line, - "Both inodes should not be NULL: " - "inode1 NULL inode2 %lu", inode2->i_ino); - ret = -EIO; - } else if (inode2 == NULL) { - __ext4_error(inode1->i_sb, function, line, - "Both inodes should not be NULL: " - "inode1 %lu inode2 NULL", inode1->i_ino); - ret = -EIO; - } - return ret; -} - -/** * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem * - * @orig_inode: original inode structure - * @donor_inode: donor inode structure - * Acquire write lock of i_data_sem of the two inodes (orig and donor) by - * i_ino order. + * Acquire write lock of i_data_sem of the two inodes */ static void -double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) +double_down_write_data_sem(struct inode *first, struct inode *second) { - struct inode *first = orig_inode, *second = donor_inode; + if (first < second) { + down_write(&EXT4_I(first)->i_data_sem); + down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); + } else { + down_write(&EXT4_I(second)->i_data_sem); + down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING); - /* - * Use the inode number to provide the stable locking order instead - * of its address, because the C language doesn't guarantee you can - * compare pointers that don't come from the same array. - */ - if (donor_inode->i_ino < orig_inode->i_ino) { - first = donor_inode; - second = orig_inode; } - - down_write(&EXT4_I(first)->i_data_sem); - down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); } /** @@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode, return -EINVAL; } - /* Files should be in the same ext4 FS */ - if (orig_inode->i_sb != donor_inode->i_sb) { - ext4_debug("ext4 move extent: The argument files " - "should be in same FS [ino:orig %lu, donor %lu]\n", - orig_inode->i_ino, donor_inode->i_ino); - return -EINVAL; - } - /* Ext4 move extent supports only extent based file */ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " @@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode, * @inode1: the inode structure * @inode2: the inode structure * - * Lock two inodes' i_mutex by i_ino order. - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. + * Lock two inodes' i_mutex */ -static int +static void mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { - int ret = 0; - - BUG_ON(inode1 == NULL && inode2 == NULL); - - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); - if (ret < 0) - goto out; - - if (inode1 == inode2) { - mutex_lock(&inode1->i_mutex); - goto out; - } - - if (inode1->i_ino < inode2->i_ino) { + BUG_ON(inode1 == inode2); + if (inode1 < inode2) { mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); } else { mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } - -out: - return ret; } /** @@ -1109,28 +1051,13 @@ out: * @inode1: the inode that is released first * @inode2: the inode that is released second * - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ -static int +static void mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { - int ret = 0; - - BUG_ON(inode1 == NULL && inode2 == NULL); - - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); - if (ret < 0) - goto out; - - if (inode1) - mutex_unlock(&inode1->i_mutex); - - if (inode2 && inode2 != inode1) - mutex_unlock(&inode2->i_mutex); - -out: - return ret; + mutex_unlock(&inode1->i_mutex); + mutex_unlock(&inode2->i_mutex); } /** @@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; ext4_lblk_t rest_blocks; pgoff_t orig_page_offset = 0, seq_end_page; - int ret1, ret2, depth, last_extent = 0; + int ret, depth, last_extent = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; int data_offset_in_page; int block_len_in_page; int uninit; - /* orig and donor should be different file */ - if (orig_inode->i_ino == donor_inode->i_ino) { + if (orig_inode->i_sb != donor_inode->i_sb) { + ext4_debug("ext4 move extent: The argument files " + "should be in same FS [ino:orig %lu, donor %lu]\n", + orig_inode->i_ino, donor_inode->i_ino); + return -EINVAL; + } + + /* orig and donor should be different inodes */ + if (orig_inode == donor_inode) { ext4_debug("ext4 move extent: The argument files should not " - "be same file [ino:orig %lu, donor %lu]\n", + "be same inode [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } @@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } - + /* TODO: This is non obvious task to swap blocks for inodes with full + jornaling enabled */ + if (ext4_should_journal_data(orig_inode) || + ext4_should_journal_data(donor_inode)) { + return -EINVAL; + } /* Protect orig and donor inodes against a truncate */ - ret1 = mext_inode_double_lock(orig_inode, donor_inode); - if (ret1 < 0) - return ret1; + mext_inode_double_lock(orig_inode, donor_inode); /* Protect extent tree against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ - ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, + ret = mext_check_arguments(orig_inode, donor_inode, orig_start, donor_start, &len); - if (ret1) + if (ret) goto out; file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; @@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, if (file_end < block_end) len -= block_end - file_end; - ret1 = get_ext_path(orig_inode, block_start, &orig_path); - if (ret1) + ret = get_ext_path(orig_inode, block_start, &orig_path); + if (ret) goto out; /* Get path structure to check the hole */ - ret1 = get_ext_path(orig_inode, block_start, &holecheck_path); - if (ret1) + ret = get_ext_path(orig_inode, block_start, &holecheck_path); + if (ret) goto out; depth = ext_depth(orig_inode); @@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { - ret1 = last_extent; + ret = last_extent; goto out; } last_extent = mext_next_extent(orig_inode, orig_path, &ext_dummy); if (last_extent < 0) { - ret1 = last_extent; + ret = last_extent; goto out; } seq_start = le32_to_cpu(ext_cur->ee_block); @@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, if (le32_to_cpu(ext_cur->ee_block) > block_end) { ext4_debug("ext4 move extent: The specified range of file " "may be the hole\n"); - ret1 = -EINVAL; + ret = -EINVAL; goto out; } @@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { - ret1 = last_extent; + ret = last_extent; break; } add_blocks = ext4_ext_get_actual_len(ext_cur); @@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, orig_page_offset, data_offset_in_page, block_len_in_page, uninit, - &ret1); + &ret); /* Count how many blocks we have exchanged */ *moved_len += block_len_in_page; - if (ret1 < 0) + if (ret < 0) break; if (*moved_len > len) { EXT4_ERROR_INODE(orig_inode, "We replaced blocks too much! " "sum of replaced: %llu requested: %llu", *moved_len, len); - ret1 = -EIO; + ret = -EIO; break; } @@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, } double_down_write_data_sem(orig_inode, donor_inode); - if (ret1 < 0) + if (ret < 0) break; /* Decrease buffer counter */ if (holecheck_path) ext4_ext_drop_refs(holecheck_path); - ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path); - if (ret1) + ret = get_ext_path(orig_inode, seq_start, &holecheck_path); + if (ret) break; depth = holecheck_path->p_depth; /* Decrease buffer counter */ if (orig_path) ext4_ext_drop_refs(orig_path); - ret1 = get_ext_path(orig_inode, seq_start, &orig_path); - if (ret1) + ret = get_ext_path(orig_inode, seq_start, &orig_path); + if (ret) break; ext_cur = holecheck_path[depth].p_ext; @@ -1412,12 +1349,7 @@ out: kfree(holecheck_path); } double_up_write_data_sem(orig_inode, donor_inode); - ret2 = mext_inode_double_unlock(orig_inode, donor_inode); - - if (ret1) - return ret1; - else if (ret2) - return ret2; + mext_inode_double_unlock(orig_inode, donor_inode); - return 0; + return ret; } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 0a94cbbe8828..ac769399b141 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1801,9 +1801,7 @@ retry: err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); -#ifdef CONFIG_EXT4_FS_XATTR inode->i_op = &ext4_special_inode_operations; -#endif err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 3407a62590d6..231cacb8f640 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -200,8 +200,11 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) * be a partial of a flex group. * * @sb: super block of fs to which the groups belongs + * + * Returns 0 on a successful allocation of the metadata blocks in the + * block group. */ -static void ext4_alloc_group_tables(struct super_block *sb, +static int ext4_alloc_group_tables(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd, int flexbg_size) { @@ -226,6 +229,8 @@ static void ext4_alloc_group_tables(struct super_block *sb, (last_group & ~(flexbg_size - 1)))); next_group: group = group_data[0].group; + if (src_group >= group_data[0].group + flex_gd->count) + return -ENOSPC; start_blk = ext4_group_first_block_no(sb, src_group); last_blk = start_blk + group_data[src_group - group].blocks_count; @@ -235,7 +240,6 @@ next_group: start_blk += overhead; - BUG_ON(src_group >= group_data[0].group + flex_gd->count); /* We collect contiguous blocks as much as possible. */ src_group++; for (; src_group <= last_group; src_group++) @@ -300,6 +304,7 @@ next_group: group_data[i].free_blocks_count); } } + return 0; } static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, @@ -451,6 +456,9 @@ static int setup_new_flex_group_blocks(struct super_block *sb, gdblocks = ext4_bg_num_gdb(sb, group); start = ext4_group_first_block_no(sb, group); + if (!ext4_bg_has_super(sb, group)) + goto handle_itb; + /* Copy all of the GDT blocks into the backup in this group */ for (j = 0, block = start + 1; j < gdblocks; j++, block++) { struct buffer_head *gdb; @@ -493,6 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb, goto out; } +handle_itb: /* Initialize group tables of the grop @group */ if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) goto handle_bb; @@ -1293,13 +1302,15 @@ exit_journal: err = err2; if (!err) { - int i; + int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); + int gdb_num_end = ((group + flex_gd->count - 1) / + EXT4_DESC_PER_BLOCK(sb)); + update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); - for (i = 0; i < flex_gd->count; i++, group++) { + for (; gdb_num <= gdb_num_end; gdb_num++) { struct buffer_head *gdb_bh; - int gdb_num; - gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb); + gdb_bh = sbi->s_group_desc[gdb_num]; update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, gdb_bh->b_size); @@ -1676,7 +1687,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) */ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, flexbg_size)) { - ext4_alloc_group_tables(sb, flex_gd, flexbg_size); + if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) + break; err = ext4_flex_group_add(sb, resize_inode, flex_gd); if (unlikely(err)) break; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8f3459e2ffee..94dc725a1bb8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1599,9 +1599,7 @@ static int parse_options(char *options, struct super_block *sb, unsigned int *journal_ioprio, int is_remount) { -#ifdef CONFIG_QUOTA struct ext4_sb_info *sbi = EXT4_SB(sb); -#endif char *p; substring_t args[MAX_OPT_ARGS]; int token; @@ -1650,6 +1648,16 @@ static int parse_options(char *options, struct super_block *sb, } } #endif + if (test_opt(sb, DIOREAD_NOLOCK)) { + int blocksize = + BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); + + if (blocksize < PAGE_CACHE_SIZE) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "dioread_nolock if block size != PAGE_SIZE"); + return 0; + } + } return 1; } @@ -1692,7 +1700,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq, static const char *token2str(int token) { - static const struct match_token *t; + const struct match_token *t; for (t = tokens; t->token != Opt_err; t++) if (t->token == token && !strchr(t->pattern, '=')) @@ -2112,7 +2120,9 @@ static void ext4_orphan_cleanup(struct super_block *sb, __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); + mutex_lock(&inode->i_mutex); ext4_truncate(inode); + mutex_unlock(&inode->i_mutex); nr_truncates++; } else { ext4_msg(sb, KERN_DEBUG, @@ -3226,15 +3236,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) clear_opt(sb, DELALLOC); } - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); - if (test_opt(sb, DIOREAD_NOLOCK)) { - if (blocksize < PAGE_SIZE) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "dioread_nolock if block size != PAGE_SIZE"); - goto failed_mount; - } - } - sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); @@ -3276,6 +3277,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, @@ -4506,7 +4508,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } ext4_setup_system_zone(sb); - if (sbi->s_journal == NULL) + if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index e88748e55c0f..e712a8cb1653 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -495,7 +495,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); - dquot_free_block(inode, 1); + dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } @@ -784,7 +784,8 @@ inserted: else { /* The old block is released after updating the inode. */ - error = dquot_alloc_block(inode, 1); + error = dquot_alloc_block(inode, + EXT4_C2B(EXT4_SB(sb), 1)); if (error) goto cleanup; error = ext4_journal_get_write_access(handle, @@ -880,7 +881,7 @@ cleanup: return error; cleanup_dquot: - dquot_free_block(inode, 1); + dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); goto cleanup; bad_block: diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 42a853eec632..1b21e0a061d5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -68,6 +68,7 @@ int writeback_in_progress(struct backing_dev_info *bdi) { return test_bit(BDI_writeback_running, &bdi->state); } +EXPORT_SYMBOL(writeback_in_progress); static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) { diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ceb3b29c8cdf..1f81f70dda62 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1580,6 +1580,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->pages[req->num_pages] = page; req->num_pages++; + offset = 0; num -= this_num; total_len += this_num; index++; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bc438320cac5..d48478a864b0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -645,7 +645,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; - drop_nlink(inode); + /* + * If i_nlink == 0 then unlink doesn't make sense, yet this can + * happen if userspace filesystem is careless. It would be + * difficult to enforce correct nlink usage so just ignore this + * condition here + */ + if (inode->i_nlink > 0) + drop_nlink(inode); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); fuse_invalidate_attr(dir); diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 70ba891654f8..fdef7f0e28ab 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -168,6 +168,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, case GFS2_SMALL_FH_SIZE: case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: + if (fh_len < GFS2_SMALL_FH_SIZE) + return NULL; this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32; this.no_formal_ino |= be32_to_cpu(fh[1]); this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32; @@ -187,6 +189,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid, switch (fh_type) { case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: + if (fh_len < GFS2_LARGE_FH_SIZE) + return NULL; parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32; parent.no_formal_ino |= be32_to_cpu(fh[5]); parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 6b1efb594d90..a392e32af480 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -258,16 +258,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) struct gfs2_meta_header *mh; struct gfs2_trans *tr; - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); if (!list_empty(&bd->bd_list_tr)) - goto out; + return; tr = current->journal_info; tr->tr_touched = 1; tr->tr_num_buf++; list_add(&bd->bd_list_tr, &tr->tr_list_buf); if (!list_empty(&le->le_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); gfs2_meta_check(sdp, bd->bd_bh); @@ -278,9 +276,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) sdp->sd_log_num_buf++; list_add(&le->le_list, &sdp->sd_log_le_buf); tr->tr_num_buf_new++; -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } static void buf_lo_before_commit(struct gfs2_sbd *sdp) @@ -611,11 +606,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) struct address_space *mapping = bd->bd_bh->b_page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); if (tr) { if (!list_empty(&bd->bd_list_tr)) - goto out; + return; tr->tr_touched = 1; if (gfs2_is_jdata(ip)) { tr->tr_num_buf++; @@ -623,7 +616,7 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) } } if (!list_empty(&le->le_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); @@ -635,9 +628,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) } else { list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); } -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } static void gfs2_check_magic(struct buffer_head *bh) diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 86ac75d99d31..6ab2a77e7726 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -145,14 +145,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_bufdata *bd; + lock_buffer(bh); + gfs2_log_lock(sdp); bd = bh->b_private; if (bd) gfs2_assert(sdp, bd->bd_gl == gl); else { + gfs2_log_unlock(sdp); + unlock_buffer(bh); gfs2_attach_bufdata(gl, bh, meta); bd = bh->b_private; + lock_buffer(bh); + gfs2_log_lock(sdp); } lops_add(sdp, &bd->bd_le); + gfs2_log_unlock(sdp); + unlock_buffer(bh); } void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) diff --git a/fs/isofs/export.c b/fs/isofs/export.c index dd4687ff30d0..516eb21895c6 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb, { struct isofs_fid *ifid = (struct isofs_fid *)fid; - if (fh_type != 2) + if (fh_len < 2 || fh_type != 2) return NULL; return isofs_export_iget(sb, diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index f2b9a571f4cf..9626bc8cbbd4 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -86,7 +86,12 @@ nope: static void release_data_buffer(struct buffer_head *bh) { if (buffer_freed(bh)) { + WARN_ON_ONCE(buffer_dirty(bh)); clear_buffer_freed(bh); + clear_buffer_mapped(bh); + clear_buffer_new(bh); + clear_buffer_req(bh); + bh->b_bdev = NULL; release_buffer_page(bh); } else put_bh(bh); @@ -853,17 +858,35 @@ restart_loop: * there's no point in keeping a checkpoint record for * it. */ - /* A buffer which has been freed while still being - * journaled by a previous transaction may end up still - * being dirty here, but we want to avoid writing back - * that buffer in the future after the "add to orphan" - * operation been committed, That's not only a performance - * gain, it also stops aliasing problems if the buffer is - * left behind for writeback and gets reallocated for another - * use in a different page. */ - if (buffer_freed(bh) && !jh->b_next_transaction) { - clear_buffer_freed(bh); - clear_buffer_jbddirty(bh); + /* + * A buffer which has been freed while still being journaled by + * a previous transaction. + */ + if (buffer_freed(bh)) { + /* + * If the running transaction is the one containing + * "add to orphan" operation (b_next_transaction != + * NULL), we have to wait for that transaction to + * commit before we can really get rid of the buffer. + * So just clear b_modified to not confuse transaction + * credit accounting and refile the buffer to + * BJ_Forget of the running transaction. If the just + * committed transaction contains "add to orphan" + * operation, we can completely invalidate the buffer + * now. We are rather throughout in that since the + * buffer may be still accessible when blocksize < + * pagesize and it is attached to the last partial + * page. + */ + jh->b_modified = 0; + if (!jh->b_next_transaction) { + clear_buffer_freed(bh); + clear_buffer_jbddirty(bh); + clear_buffer_mapped(bh); + clear_buffer_new(bh); + clear_buffer_req(bh); + bh->b_bdev = NULL; + } } if (buffer_jbddirty(bh)) { diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index b2a7e5244e39..ee959a9fed62 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1845,15 +1845,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) * We're outside-transaction here. Either or both of j_running_transaction * and j_committing_transaction may be NULL. */ -static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) +static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, + int partial_page) { transaction_t *transaction; struct journal_head *jh; int may_free = 1; - int ret; BUFFER_TRACE(bh, "entry"); +retry: /* * It is safe to proceed here without the j_list_lock because the * buffers cannot be stolen by try_to_free_buffers as long as we are @@ -1881,10 +1882,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * clear the buffer dirty bit at latest at the moment when the * transaction marking the buffer as freed in the filesystem * structures is committed because from that moment on the - * buffer can be reallocated and used by a different page. + * block can be reallocated and used by a different page. * Since the block hasn't been freed yet but the inode has * already been added to orphan list, it is safe for us to add * the buffer to BJ_Forget list of the newest transaction. + * + * Also we have to clear buffer_mapped flag of a truncated buffer + * because the buffer_head may be attached to the page straddling + * i_size (can happen only when blocksize < pagesize) and thus the + * buffer_head can be reused when the file is extended again. So we end + * up keeping around invalidated buffers attached to transactions' + * BJ_Forget list just to stop checkpointing code from cleaning up + * the transaction this buffer was modified in. */ transaction = jh->b_transaction; if (transaction == NULL) { @@ -1911,13 +1920,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * committed, the buffer won't be needed any * longer. */ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); - ret = __dispose_buffer(jh, + may_free = __dispose_buffer(jh, journal->j_running_transaction); - journal_put_journal_head(jh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - spin_unlock(&journal->j_state_lock); - return ret; + goto zap_buffer; } else { /* There is no currently-running transaction. So the * orphan record which we wrote for this file must have @@ -1925,13 +1930,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * the committing transaction, if it exists. */ if (journal->j_committing_transaction) { JBUFFER_TRACE(jh, "give to committing trans"); - ret = __dispose_buffer(jh, + may_free = __dispose_buffer(jh, journal->j_committing_transaction); - journal_put_journal_head(jh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - spin_unlock(&journal->j_state_lock); - return ret; + goto zap_buffer; } else { /* The orphan record's transaction has * committed. We can cleanse this buffer */ @@ -1952,10 +1953,26 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) } /* * The buffer is committing, we simply cannot touch - * it. So we just set j_next_transaction to the - * running transaction (if there is one) and mark - * buffer as freed so that commit code knows it should - * clear dirty bits when it is done with the buffer. + * it. If the page is straddling i_size we have to wait + * for commit and try again. + */ + if (partial_page) { + tid_t tid = journal->j_committing_transaction->t_tid; + + journal_put_journal_head(jh); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + spin_unlock(&journal->j_state_lock); + unlock_buffer(bh); + log_wait_commit(journal, tid); + lock_buffer(bh); + goto retry; + } + /* + * OK, buffer won't be reachable after truncate. We just set + * j_next_transaction to the running transaction (if there is + * one) and mark buffer as freed so that commit code knows it + * should clear dirty bits when it is done with the buffer. */ set_buffer_freed(bh); if (journal->j_running_transaction && buffer_jbddirty(bh)) @@ -1978,6 +1995,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) } zap_buffer: + /* + * This is tricky. Although the buffer is truncated, it may be reused + * if blocksize < pagesize and it is attached to the page straddling + * EOF. Since the buffer might have been added to BJ_Forget list of the + * running transaction, journal_get_write_access() won't clear + * b_modified and credit accounting gets confused. So clear b_modified + * here. */ + jh->b_modified = 0; journal_put_journal_head(jh); zap_buffer_no_jh: spin_unlock(&journal->j_list_lock); @@ -2026,7 +2051,8 @@ void journal_invalidatepage(journal_t *journal, if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); - may_free &= journal_unmap_buffer(journal, bh); + may_free &= journal_unmap_buffer(journal, bh, + offset > 0); unlock_buffer(bh); } curr_off = next_off; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9956ac68b72b..e5bfb1150cf5 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1317,6 +1317,11 @@ static void jbd2_mark_journal_empty(journal_t *journal) BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); read_lock(&journal->j_state_lock); + /* Is it already empty? */ + if (sb->s_start == 0) { + read_unlock(&journal->j_state_lock); + return; + } jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n", journal->j_tail_sequence); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ddcd3549c6c2..de8b4cb7c31e 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -209,7 +209,8 @@ repeat: if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); - if (!journal->j_running_transaction) { + if (!journal->j_running_transaction && + !journal->j_barrier_count) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index db3889ba8818..8608f8779143 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_raw_inode ri; + uint32_t alloc_len = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; + jffs2_dbg(1, "%s()\n", __func__); + + if (pageofs > inode->i_size) { + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); + if (ret) + return ret; + } + + mutex_lock(&f->sem); pg = grab_cache_page_write_begin(mapping, index, flags); - if (!pg) + if (!pg) { + if (alloc_len) + jffs2_complete_reservation(c); + mutex_unlock(&f->sem); return -ENOMEM; + } *pagep = pg; - jffs2_dbg(1, "%s()\n", __func__); - - if (pageofs > inode->i_size) { + if (alloc_len) { /* Make new hole frag from old EOF to new page */ - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); - struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; - uint32_t alloc_len; jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs); - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); - if (ret) - goto out_page; - - mutex_lock(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); @@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } jffs2_complete_reservation(c); inode->i_size = pageofs; - mutex_unlock(&f->sem); } /* @@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, * case of a short-copy. */ if (!PageUptodate(pg)) { - mutex_lock(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); - mutex_unlock(&f->sem); if (ret) goto out_page; } + mutex_unlock(&f->sem); jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); return ret; out_page: unlock_page(pg); page_cache_release(pg); + mutex_unlock(&f->sem); return ret; } diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 6784d1e7a7eb..ffa8f12c7a54 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -375,14 +375,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, spin_unlock(&c->erase_completion_lock); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); - if (ret) - return ret; + /* Just lock it again and continue. Nothing much can change because we hold c->alloc_sem anyway. In fact, it's not entirely clear why we hold c->erase_completion_lock in the majority of this function... but that's a question for another (more caffeine-rich) day. */ spin_lock(&c->erase_completion_lock); + if (ret) + return ret; + waste = jeb->free_size; jffs2_link_node_ref(c, jeb, (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 74d9be19df3f..6bec5c0bc268 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -1043,10 +1043,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); - if (ret || ops.oobretlen != ops.ooblen) { + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); - if (!ret) + if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } @@ -1085,10 +1085,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); - if (ret || ops.oobretlen != ops.ooblen) { + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); - if (!ret) + if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 8392cb85bd54..a3a09877ea62 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -551,6 +551,9 @@ again: status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); if (status < 0) break; + /* Resend the blocking lock request after a server reboot */ + if (resp->status == nlm_lck_denied_grace_period) + continue; if (resp->status != nlm_lck_blocked) break; } diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index d269ada7670e..982d2676e1f8 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr, { __be32 *p; - BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); + WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 7ef14b3c5bee..606a8dd8818c 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -40,6 +40,7 @@ struct nsm_args { u32 proc; char *mon_name; + char *nodename; }; struct nsm_res { @@ -94,6 +95,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, .vers = 3, .proc = NLMPROC_NSM_NOTIFY, .mon_name = nsm->sm_mon_name, + .nodename = utsname()->nodename, }; struct rpc_message msg = { .rpc_argp = &args, @@ -430,7 +432,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; - encode_nsm_string(xdr, utsname()->nodename); + encode_nsm_string(xdr, argp->nodename); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(argp->prog); *p++ = cpu_to_be32(argp->vers); diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 3250f280a171..58ddc38cfccd 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -251,10 +251,9 @@ out_err: return err; } -static int lockd_up_net(struct net *net) +static int lockd_up_net(struct svc_serv *serv, struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); - struct svc_serv *serv = nlmsvc_rqst->rq_server; int error; if (ln->nlmsvc_users++) @@ -276,10 +275,9 @@ err_rpcb: return error; } -static void lockd_down_net(struct net *net) +static void lockd_down_net(struct svc_serv *serv, struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); - struct svc_serv *serv = nlmsvc_rqst->rq_server; if (ln->nlmsvc_users) { if (--ln->nlmsvc_users == 0) { @@ -307,7 +305,7 @@ int lockd_up(struct net *net) * Check whether we're already up and running. */ if (nlmsvc_rqst) { - error = lockd_up_net(net); + error = lockd_up_net(nlmsvc_rqst->rq_server, net); goto out; } @@ -378,7 +376,7 @@ out: return error; err_start: - lockd_down_net(net); + lockd_down_net(serv, net); goto destroy_and_out; } EXPORT_SYMBOL_GPL(lockd_up); @@ -390,7 +388,7 @@ void lockd_down(struct net *net) { mutex_lock(&nlmsvc_mutex); - lockd_down_net(net); + lockd_down_net(nlmsvc_rqst->rq_server, net); if (nlmsvc_users) { if (--nlmsvc_users) goto out; diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index d27aab11f324..d413af338da1 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { - if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) + error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh)); + if (error != 0) goto no_locks; *filp = file; diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 7f6a23f0244e..3a9c2470e4f9 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect, return bio; } -static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, +static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, struct page *page, struct pnfs_block_extent *be, void (*end_io)(struct bio *, int err), - struct parallel_io *par) + struct parallel_io *par, + unsigned int offset, int len) { + isect = isect + (offset >> SECTOR_SHIFT); + dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, + npg, rw, (unsigned long long)isect, offset, len); retry: if (!bio) { bio = bl_alloc_init_bio(npg, isect, be, end_io, par); if (!bio) return ERR_PTR(-ENOMEM); } - if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { + if (bio_add_page(bio, page, len, offset) < len) { bio = bl_submit_bio(rw, bio); goto retry; } return bio; } +static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, + sector_t isect, struct page *page, + struct pnfs_block_extent *be, + void (*end_io)(struct bio *, int err), + struct parallel_io *par) +{ + return do_add_page_to_bio(bio, npg, rw, isect, page, be, + end_io, par, 0, PAGE_CACHE_SIZE); +} + /* This is basically copied from mpage_end_io_read */ static void bl_end_io_read(struct bio *bio, int err) { @@ -443,6 +457,107 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be) return; } +static void +bl_read_single_end_io(struct bio *bio, int error) +{ + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct page *page = bvec->bv_page; + + /* Only one page in bvec */ + unlock_page(page); +} + +static int +bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be, + unsigned int offset, unsigned int len) +{ + struct bio *bio; + struct page *shadow_page; + sector_t isect; + char *kaddr, *kshadow_addr; + int ret = 0; + + dprintk("%s: offset %u len %u\n", __func__, offset, len); + + shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (shadow_page == NULL) + return -ENOMEM; + + bio = bio_alloc(GFP_NOIO, 1); + if (bio == NULL) + return -ENOMEM; + + isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + + (offset / SECTOR_SIZE); + + bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; + bio->bi_bdev = be->be_mdev; + bio->bi_end_io = bl_read_single_end_io; + + lock_page(shadow_page); + if (bio_add_page(bio, shadow_page, + SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) { + unlock_page(shadow_page); + bio_put(bio); + return -EIO; + } + + submit_bio(READ, bio); + wait_on_page_locked(shadow_page); + if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) { + ret = -EIO; + } else { + kaddr = kmap_atomic(page); + kshadow_addr = kmap_atomic(shadow_page); + memcpy(kaddr + offset, kshadow_addr + offset, len); + kunmap_atomic(kshadow_addr); + kunmap_atomic(kaddr); + } + __free_page(shadow_page); + bio_put(bio); + + return ret; +} + +static int +bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be, + unsigned int dirty_offset, unsigned int dirty_len, + bool full_page) +{ + int ret = 0; + unsigned int start, end; + + if (full_page) { + start = 0; + end = PAGE_CACHE_SIZE; + } else { + start = round_down(dirty_offset, SECTOR_SIZE); + end = round_up(dirty_offset + dirty_len, SECTOR_SIZE); + } + + dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len); + if (!be) { + zero_user_segments(page, start, dirty_offset, + dirty_offset + dirty_len, end); + if (start == 0 && end == PAGE_CACHE_SIZE && + trylock_page(page)) { + SetPageUptodate(page); + unlock_page(page); + } + return ret; + } + + if (start != dirty_offset) + ret = bl_do_readpage_sync(page, be, start, + dirty_offset - start); + + if (!ret && (dirty_offset + dirty_len < end)) + ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len, + end - dirty_offset - dirty_len); + + return ret; +} + /* Given an unmapped page, zero it or read in page for COW, page is locked * by caller. */ @@ -476,7 +591,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read) SetPageUptodate(page); cleanup: - bl_put_extent(cow_read); if (bh) free_buffer_head(bh); if (ret) { @@ -547,6 +661,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync) struct parallel_io *par; loff_t offset = wdata->args.offset; size_t count = wdata->args.count; + unsigned int pg_offset, pg_len, saved_len; struct page **pages = wdata->args.pages; struct page *page; pgoff_t index; @@ -651,10 +766,11 @@ next_page: if (!extent_length) { /* We've used up the previous extent */ bl_put_extent(be); + bl_put_extent(cow_read); bio = bl_submit_bio(WRITE, bio); /* Get the next one */ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), - isect, NULL); + isect, &cow_read); if (!be || !is_writable(be, isect)) { wdata->pnfs_error = -EINVAL; goto out; @@ -671,7 +787,26 @@ next_page: extent_length = be->be_length - (isect - be->be_f_offset); } - if (be->be_state == PNFS_BLOCK_INVALID_DATA) { + + dprintk("%s offset %lld count %Zu\n", __func__, offset, count); + pg_offset = offset & ~PAGE_CACHE_MASK; + if (pg_offset + count > PAGE_CACHE_SIZE) + pg_len = PAGE_CACHE_SIZE - pg_offset; + else + pg_len = count; + + saved_len = pg_len; + if (be->be_state == PNFS_BLOCK_INVALID_DATA && + !bl_is_sector_init(be->be_inval, isect)) { + ret = bl_read_partial_page_sync(pages[i], cow_read, + pg_offset, pg_len, true); + if (ret) { + dprintk("%s bl_read_partial_page_sync fail %d\n", + __func__, ret); + wdata->pnfs_error = ret; + goto out; + } + ret = bl_mark_sectors_init(be->be_inval, isect, PAGE_CACHE_SECTORS); if (unlikely(ret)) { @@ -680,15 +815,33 @@ next_page: wdata->pnfs_error = ret; goto out; } + + /* Expand to full page write */ + pg_offset = 0; + pg_len = PAGE_CACHE_SIZE; + } else if ((pg_offset & (SECTOR_SIZE - 1)) || + (pg_len & (SECTOR_SIZE - 1))) { + /* ahh, nasty case. We have to do sync full sector + * read-modify-write cycles. + */ + unsigned int saved_offset = pg_offset; + ret = bl_read_partial_page_sync(pages[i], be, pg_offset, + pg_len, false); + pg_offset = round_down(pg_offset, SECTOR_SIZE); + pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE) + - pg_offset; } - bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE, + bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE, isect, pages[i], be, - bl_end_io_write, par); + bl_end_io_write, par, + pg_offset, pg_len); if (IS_ERR(bio)) { wdata->pnfs_error = PTR_ERR(bio); bio = NULL; goto out; } + offset += saved_len; + count -= saved_len; isect += PAGE_CACHE_SECTORS; last_isect = isect; extent_length -= PAGE_CACHE_SECTORS; @@ -706,17 +859,16 @@ next_page: } write_done: - wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset); - if (count < wdata->res.count) { - wdata->res.count = count; - } + wdata->res.count = wdata->args.count; out: bl_put_extent(be); + bl_put_extent(cow_read); bl_submit_bio(WRITE, bio); put_parallel(par); return PNFS_ATTEMPTED; out_mds: bl_put_extent(be); + bl_put_extent(cow_read); kfree(par); return PNFS_NOT_ATTEMPTED; } @@ -1003,6 +1155,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = { static struct pnfs_layoutdriver_type blocklayout_type = { .id = LAYOUT_BLOCK_VOLUME, .name = "LAYOUT_BLOCK_VOLUME", + .owner = THIS_MODULE, .read_pagelist = bl_read_pagelist, .write_pagelist = bl_write_pagelist, .alloc_layout_hdr = bl_alloc_layout_hdr, diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index 03350690118e..39bb51a8dd18 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -41,6 +41,7 @@ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) +#define SECTOR_SIZE (1 << SECTOR_SHIFT) struct block_mount_id { spinlock_t bm_lock; /* protects list */ diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 60f7e4ec842c..37f6de36973c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -694,8 +694,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp, */ static void nfs_destroy_server(struct nfs_server *server) { - if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) || - !(server->flags & NFS_MOUNT_LOCAL_FCNTL)) + if (server->nlm_host) nlmclnt_done(server->nlm_host); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8789210c6905..a0daac7ad508 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1103,7 +1103,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) struct nfs_fattr *fattr = NULL; int error; - if (nd->flags & LOOKUP_RCU) + if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; parent = dget_parent(dentry); @@ -1219,11 +1219,14 @@ static int nfs_dentry_delete(const struct dentry *dentry) } +/* Ensure that we revalidate inode->i_nlink */ static void nfs_drop_nlink(struct inode *inode) { spin_lock(&inode->i_lock); - if (inode->i_nlink > 0) - drop_nlink(inode); + /* drop the inode if we're reasonably sure this is the last link */ + if (inode->i_nlink == 1) + clear_nlink(inode); + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; spin_unlock(&inode->i_lock); } @@ -1238,8 +1241,8 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { - drop_nlink(inode); nfs_complete_unlink(dentry, inode); + nfs_drop_nlink(inode); } iput(inode); } @@ -1502,7 +1505,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) struct iattr attr; int openflags, ret = 0; - if (nd->flags & LOOKUP_RCU) + if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; inode = dentry->d_inode; @@ -1800,10 +1803,8 @@ static int nfs_safe_remove(struct dentry *dentry) if (inode != NULL) { nfs_inode_return_delegation(inode); error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); - /* The VFS may want to delete this inode */ if (error == 0) nfs_drop_nlink(inode); - nfs_mark_for_revalidate(inode); } else error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); if (error == -ENOENT) diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index b3924b8a6000..786cd6533713 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -214,7 +214,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) { char buf1[NFS_DNS_HOSTNAME_MAXLEN+1]; struct nfs_dns_ent key, *item; - unsigned long ttl; + unsigned int ttl; ssize_t len; int ret = -EINVAL; @@ -237,7 +237,8 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) key.namelen = len; memset(&key.h, 0, sizeof(key.h)); - ttl = get_expiry(&buf); + if (get_uint(&buf, &ttl) < 0) + goto out; if (ttl == 0) goto out; key.h.expiry_time = ttl + seconds_since_boot(); diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 98d664349f7e..9131b17f3a72 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -747,9 +747,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) } if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { - ret = mlen; - complete_request_key(cons, -ENOKEY); - goto out_incomplete; + ret = -ENOKEY; + goto out; } namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); @@ -766,7 +765,6 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) out: complete_request_key(cons, ret); -out_incomplete: return ret; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index e8bbfa5b3500..edf411988bf3 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode) nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); + memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; else diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b777bdaba4c5..33aa76fec3aa 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -277,8 +277,9 @@ extern void nfs_sb_active(struct super_block *sb); extern void nfs_sb_deactive(struct super_block *sb); /* namespace.c */ +#define NFS_PATH_CANONICAL 1 extern char *nfs_path(char **p, struct dentry *dentry, - char *buffer, ssize_t buflen); + char *buffer, ssize_t buflen, unsigned flags); extern struct vfsmount *nfs_d_automount(struct path *path); #ifdef CONFIG_NFS_V4 rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *); @@ -371,7 +372,7 @@ static inline char *nfs_devname(struct dentry *dentry, char *buffer, ssize_t buflen) { char *dummy; - return nfs_path(&dummy, dentry, buffer, buflen); + return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL); } /* diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 8e65c7f1f87c..015f71f8f62c 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -181,7 +181,7 @@ int nfs_mount(struct nfs_mount_request *info) else msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT]; - status = rpc_call_sync(mnt_clnt, &msg, 0); + status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT); rpc_shutdown_client(mnt_clnt); if (status < 0) diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index d51868e5683c..43275167649a 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -37,6 +37,7 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry, * @dentry - pointer to dentry * @buffer - result buffer * @buflen - length of buffer + * @flags - options (see below) * * Helper function for constructing the server pathname * by arbitrary hashed dentry. @@ -44,8 +45,14 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry, * This is mainly for use in figuring out the path on the * server side when automounting on top of an existing partition * and in generating /proc/mounts and friends. + * + * Supported flags: + * NFS_PATH_CANONICAL: ensure there is exactly one slash after + * the original device (export) name + * (if unset, the original name is returned verbatim) */ -char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen) +char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, + unsigned flags) { char *end; int namelen; @@ -78,7 +85,7 @@ rename_retry: rcu_read_unlock(); goto rename_retry; } - if (*end != '/') { + if ((flags & NFS_PATH_CANONICAL) && *end != '/') { if (--buflen < 0) { spin_unlock(&dentry->d_lock); rcu_read_unlock(); @@ -95,9 +102,11 @@ rename_retry: return end; } namelen = strlen(base); - /* Strip off excess slashes in base string */ - while (namelen > 0 && base[namelen - 1] == '/') - namelen--; + if (flags & NFS_PATH_CANONICAL) { + /* Strip off excess slashes in base string */ + while (namelen > 0 && base[namelen - 1] == '/') + namelen--; + } buflen -= namelen; if (buflen < 0) { spin_unlock(&dentry->d_lock); @@ -244,11 +253,31 @@ out_nofree: return mnt; } +static int +nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ + if (NFS_FH(dentry->d_inode)->size != 0) + return nfs_getattr(mnt, dentry, stat); + generic_fillattr(dentry->d_inode, stat); + return 0; +} + +static int +nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) +{ + if (NFS_FH(dentry->d_inode)->size != 0) + return nfs_setattr(dentry, attr); + return -EACCES; +} + const struct inode_operations nfs_mountpoint_inode_operations = { .getattr = nfs_getattr, + .setattr = nfs_setattr, }; const struct inode_operations nfs_referral_inode_operations = { + .getattr = nfs_namespace_getattr, + .setattr = nfs_namespace_setattr, }; static void nfs_expire_automounts(struct work_struct *work) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 5242eae6711a..a7a043d272da 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle, nfs_fattr_init(info->fattr); status = rpc_call_sync(client, &msg, 0); dprintk("%s: reply fsinfo: %d\n", __func__, status); - if (!(info->fattr->valid & NFS_ATTR_FATTR)) { + if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) { msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; msg.rpc_resp = info->fattr; status = rpc_call_sync(client, &msg, 0); @@ -644,7 +644,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; - __be32 *verf = NFS_COOKIEVERF(dir); + __be32 *verf = NFS_I(dir)->cookieverf; struct nfs3_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index a7f3dedc4ec7..b604be2dae6a 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -81,7 +81,8 @@ static char *nfs_path_component(const char *nfspath, const char *end) static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen) { char *limit; - char *path = nfs_path(&limit, dentry, buffer, buflen); + char *path = nfs_path(&limit, dentry, buffer, buflen, + NFS_PATH_CANONICAL); if (!IS_ERR(path)) { char *path_component = nfs_path_component(path, limit); if (path_component) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d60c2d870ab0..30351877aee2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -307,8 +307,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc dprintk("%s ERROR: %d Reset session\n", __func__, errorcode); nfs4_schedule_session_recovery(clp->cl_session); - exception->retry = 1; - break; + goto wait_on_recovery; #endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { @@ -1478,9 +1477,11 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) data->timestamp = jiffies; if (nfs4_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, - &data->o_res.seq_res, task)) - return; - rpc_call_start(task); + &data->o_res.seq_res, + task) != 0) + nfs_release_seqid(data->o_arg.seqid); + else + rpc_call_start(task); return; unlock_no_action: rcu_read_unlock(); @@ -2097,9 +2098,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), &calldata->arg.seq_args, &calldata->res.seq_res, - task)) - goto out; - rpc_call_start(task); + task) != 0) + nfs_release_seqid(calldata->arg.seqid); + else + rpc_call_start(task); out: dprintk("%s: done!\n", __func__); } @@ -3150,11 +3152,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); - nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); res.pgbase = args.pgbase; status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { - memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; } @@ -4306,6 +4308,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) rpc_restart_call_prepare(task); } + nfs_release_seqid(calldata->arg.seqid); } static void nfs4_locku_prepare(struct rpc_task *task, void *data) @@ -4322,9 +4325,11 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) calldata->timestamp = jiffies; if (nfs4_setup_sequence(calldata->server, &calldata->arg.seq_args, - &calldata->res.seq_res, task)) - return; - rpc_call_start(task); + &calldata->res.seq_res, + task) != 0) + nfs_release_seqid(calldata->arg.seqid); + else + rpc_call_start(task); } static const struct rpc_call_ops nfs4_locku_ops = { @@ -4469,7 +4474,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) /* Do we need to do an open_to_lock_owner? */ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) - return; + goto out_release_lock_seqid; data->arg.open_stateid = &state->stateid; data->arg.new_lock_owner = 1; data->res.open_seqid = data->arg.open_seqid; @@ -4478,10 +4483,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) data->timestamp = jiffies; if (nfs4_setup_sequence(data->server, &data->arg.seq_args, - &data->res.seq_res, task)) + &data->res.seq_res, + task) == 0) { + rpc_call_start(task); return; - rpc_call_start(task); - dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); + } + nfs_release_seqid(data->arg.open_seqid); +out_release_lock_seqid: + nfs_release_seqid(data->arg.lock_seqid); + dprintk("%s: done!, ret = %d\n", __func__, task->tk_status); } static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) @@ -5729,13 +5739,26 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) rpc_call_start(task); } +static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data) +{ + rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); + nfs41_sequence_prepare(task, data); +} + static const struct rpc_call_ops nfs41_sequence_ops = { .rpc_call_done = nfs41_sequence_call_done, .rpc_call_prepare = nfs41_sequence_prepare, .rpc_release = nfs41_sequence_release, }; -static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) +static const struct rpc_call_ops nfs41_sequence_privileged_ops = { + .rpc_call_done = nfs41_sequence_call_done, + .rpc_call_prepare = nfs41_sequence_prepare_privileged, + .rpc_release = nfs41_sequence_release, +}; + +static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred, + const struct rpc_call_ops *seq_ops) { struct nfs4_sequence_data *calldata; struct rpc_message msg = { @@ -5745,7 +5768,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, - .callback_ops = &nfs41_sequence_ops, + .callback_ops = seq_ops, .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, }; @@ -5772,7 +5795,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) return 0; - task = _nfs41_proc_sequence(clp, cred); + task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops); if (IS_ERR(task)) ret = PTR_ERR(task); else @@ -5786,7 +5809,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) struct rpc_task *task; int ret; - task = _nfs41_proc_sequence(clp, cred); + task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; @@ -5966,11 +5989,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) dprintk("<-- %s\n", __func__); } +static size_t max_response_pages(struct nfs_server *server) +{ + u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; + return nfs_page_array_len(0, max_resp_sz); +} + +static void nfs4_free_pages(struct page **pages, size_t size) +{ + int i; + + if (!pages) + return; + + for (i = 0; i < size; i++) { + if (!pages[i]) + break; + __free_page(pages[i]); + } + kfree(pages); +} + +static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) +{ + struct page **pages; + int i; + + pages = kcalloc(size, sizeof(struct page *), gfp_flags); + if (!pages) { + dprintk("%s: can't alloc array of %zu pages\n", __func__, size); + return NULL; + } + + for (i = 0; i < size; i++) { + pages[i] = alloc_page(gfp_flags); + if (!pages[i]) { + dprintk("%s: failed to allocate page\n", __func__); + nfs4_free_pages(pages, size); + return NULL; + } + } + + return pages; +} + static void nfs4_layoutget_release(void *calldata) { struct nfs4_layoutget *lgp = calldata; + struct nfs_server *server = NFS_SERVER(lgp->args.inode); + size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); + nfs4_free_pages(lgp->args.layout.pages, max_pages); put_nfs_open_context(lgp->args.ctx); kfree(calldata); dprintk("<-- %s\n", __func__); @@ -5982,9 +6052,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = { .rpc_release = nfs4_layoutget_release, }; -int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) +int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) { struct nfs_server *server = NFS_SERVER(lgp->args.inode); + size_t max_pages = max_response_pages(server); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], @@ -6002,6 +6073,13 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) dprintk("--> %s\n", __func__); + lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); + if (!lgp->args.layout.pages) { + nfs4_layoutget_release(lgp); + return -ENOMEM; + } + lgp->args.layout.pglen = max_pages * PAGE_SIZE; + lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); @@ -6047,12 +6125,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) return; } spin_lock(&lo->plh_inode->i_lock); - if (task->tk_status == 0) { - if (lrp->res.lrs_present) { - pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); - } else - BUG_ON(!list_empty(&lo->plh_segs)); - } + if (task->tk_status == 0 && lrp->res.lrs_present) + pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c54aae364bee..c8ac9a1461c2 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6081,7 +6081,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_open(xdr, res); if (status) goto out; - if (decode_getfh(xdr, &res->fh) != 0) + status = decode_getfh(xdr, &res->fh); + if (status) goto out; if (decode_getfattr(xdr, res->f_attr, res->server) != 0) goto out; diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 1afe74c42c8a..65538f569f42 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -589,6 +589,7 @@ static struct pnfs_layoutdriver_type objlayout_type = { .flags = PNFS_LAYOUTRET_ON_SETATTR | PNFS_LAYOUTRET_ON_ERROR, + .owner = THIS_MODULE, .alloc_layout_hdr = objlayout_alloc_layout_hdr, .free_layout_hdr = objlayout_free_layout_hdr, diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 38512bcd2e98..059e2c350ad7 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -574,9 +574,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, struct nfs_server *server = NFS_SERVER(ino); struct nfs4_layoutget *lgp; struct pnfs_layout_segment *lseg = NULL; - struct page **pages = NULL; - int i; - u32 max_resp_sz, max_pages; dprintk("--> %s\n", __func__); @@ -585,20 +582,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, if (lgp == NULL) return NULL; - /* allocate pages for xdr post processing */ - max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; - max_pages = nfs_page_array_len(0, max_resp_sz); - - pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); - if (!pages) - goto out_err_free; - - for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(gfp_flags); - if (!pages[i]) - goto out_err_free; - } - lgp->args.minlength = PAGE_CACHE_SIZE; if (lgp->args.minlength > range->length) lgp->args.minlength = range->length; @@ -607,39 +590,19 @@ send_layoutget(struct pnfs_layout_hdr *lo, lgp->args.type = server->pnfs_curr_ld->id; lgp->args.inode = ino; lgp->args.ctx = get_nfs_open_context(ctx); - lgp->args.layout.pages = pages; - lgp->args.layout.pglen = max_pages * PAGE_SIZE; lgp->lsegpp = &lseg; lgp->gfp_flags = gfp_flags; /* Synchronously retrieve layout information from server and * store in lseg. */ - nfs4_proc_layoutget(lgp); + nfs4_proc_layoutget(lgp, gfp_flags); if (!lseg) { /* remember that LAYOUTGET failed and suspend trying */ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); } - /* free xdr pages */ - for (i = 0; i < max_pages; i++) - __free_page(pages[i]); - kfree(pages); - return lseg; - -out_err_free: - /* free any allocated xdr pages, lgp as it's not used */ - if (pages) { - for (i = 0; i < max_pages; i++) { - if (!pages[i]) - break; - __free_page(pages[i]); - } - kfree(pages); - } - kfree(lgp); - return NULL; } /* Initiates a LAYOUTRETURN(FILE) */ diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 442ebf68eeec..8d95818330cc 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -161,7 +161,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server, struct pnfs_devicelist *devlist); extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *dev); -extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); +extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags); extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); /* pnfs.c */ diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 4ac7fca7e4bf..c252161e7b1d 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -812,7 +812,7 @@ static int nfs_show_devname(struct seq_file *m, struct dentry *root) int err = 0; if (!page) return -ENOMEM; - devname = nfs_path(&dummy, root, page, PAGE_SIZE); + devname = nfs_path(&dummy, root, page, PAGE_SIZE, 0); if (IS_ERR(devname)) err = PTR_ERR(devname); else @@ -1138,7 +1138,7 @@ static int nfs_get_option_str(substring_t args[], char **option) { kfree(*option); *option = match_strdup(args); - return !option; + return !*option; } static int nfs_get_option_ul(substring_t args[], unsigned long *option) @@ -1886,6 +1886,7 @@ static int nfs_validate_mount_data(void *options, memcpy(sap, &data->addr, sizeof(data->addr)); args->nfs_server.addrlen = sizeof(data->addr); + args->nfs_server.port = ntohs(data->addr.sin_port); if (!nfs_verify_server_address(sap)) goto out_no_address; @@ -2598,6 +2599,7 @@ static int nfs4_validate_mount_data(void *options, return -EFAULT; if (!nfs_verify_server_address(sap)) goto out_no_address; + args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); if (data->auth_flavourlen) { if (data->auth_flavourlen > 1) @@ -3146,4 +3148,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type, return res; } +MODULE_ALIAS("nfs4"); + #endif /* CONFIG_NFS_V4 */ diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 8e9689abbc0c..e9a020f283f7 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -401,7 +401,7 @@ fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) int migrated, i, err; /* listsize */ - err = get_int(mesg, &fsloc->locations_count); + err = get_uint(mesg, &fsloc->locations_count); if (err) return err; if (fsloc->locations_count > MAX_FS_LOCATIONS) @@ -459,7 +459,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) return -EINVAL; for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) { - err = get_int(mesg, &f->pseudoflavor); + err = get_uint(mesg, &f->pseudoflavor); if (err) return err; /* @@ -468,7 +468,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) * problem at export time instead of when a client fails * to authenticate. */ - err = get_int(mesg, &f->flags); + err = get_uint(mesg, &f->flags); if (err) return err; /* Only some flags are allowed to differ between flavors: */ diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 322d11ce06a4..01b090d135b7 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -581,7 +581,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel /* Just to make sure it's null-terminated: */ memcpy(buf, name, namelen); buf[namelen] = '\0'; - ret = kstrtouint(name, 10, id); + ret = kstrtouint(buf, 10, id); return ret == 0; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 987e719fbae8..dd0308d65f66 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -194,6 +194,7 @@ static __be32 do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) { struct svc_fh *resfh; + int accmode; __be32 status; resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL); @@ -253,9 +254,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o /* set reply cache */ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, &resfh->fh_handle); - if (!open->op_created) - status = do_open_permission(rqstp, resfh, open, - NFSD_MAY_NOP); + accmode = NFSD_MAY_NOP; + if (open->op_created) + accmode |= NFSD_MAY_OWNER_OVERRIDE; + status = do_open_permission(rqstp, resfh, open, accmode); set_change_info(&open->op_cinfo, current_fh); fh_dup2(current_fh, resfh); out: diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e79c24e232f4..abd785e5fd2d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1053,6 +1053,8 @@ free_client(struct nfs4_client *clp) put_group_info(clp->cl_cred.cr_group_info); kfree(clp->cl_principal); kfree(clp->cl_name.data); + idr_remove_all(&clp->cl_stateids); + idr_destroy(&clp->cl_stateids); kfree(clp); } @@ -2356,7 +2358,7 @@ nfsd4_init_slabs(void) if (openowner_slab == NULL) goto out_nomem; lockowner_slab = kmem_cache_create("nfsd4_lockowners", - sizeof(struct nfs4_openowner), 0, 0, NULL); + sizeof(struct nfs4_lockowner), 0, 0, NULL); if (lockowner_slab == NULL) goto out_nomem; file_slab = kmem_cache_create("nfsd4_files", @@ -3783,6 +3785,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); nfsd4_close_open_stateid(stp); + release_last_closed_stateid(oo); oo->oo_last_closed_stid = stp; /* place unused nfs4_stateowners on so_close_lru list to be diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 283d15e9f46d..967d68eef282 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2920,11 +2920,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, len = maxcount; v = 0; while (len > 0) { - pn = resp->rqstp->rq_resused++; + pn = resp->rqstp->rq_resused; + if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */ + maxcount -= len; + break; + } resp->rqstp->rq_vec[v].iov_base = page_address(resp->rqstp->rq_respages[pn]); resp->rqstp->rq_vec[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE; + resp->rqstp->rq_resused++; v++; len -= PAGE_SIZE; } @@ -2970,6 +2975,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd return nfserr; if (resp->xbuf->page_len) return nfserr_resource; + if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + return nfserr_resource; page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); @@ -3019,6 +3026,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 return nfserr; if (resp->xbuf->page_len) return nfserr_resource; + if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + return nfserr_resource; RESERVE_SPACE(NFS4_VERIFIER_SIZE); savep = p; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 3ab12eb2967f..d014727fc8f2 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -663,9 +663,7 @@ static ssize_t __write_ports_addfd(char *buf) err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT); if (err < 0) { - if (nfsd_serv->sv_nrthreads == 1) - svc_shutdown_net(nfsd_serv, net); - svc_destroy(nfsd_serv); + nfsd_destroy(net); return err; } @@ -734,9 +732,7 @@ out_close: svc_xprt_put(xprt); } out_err: - if (nfsd_serv->sv_nrthreads == 1) - svc_shutdown_net(nfsd_serv, net); - svc_destroy(nfsd_serv); + nfsd_destroy(net); return err; } diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 1671429ffa66..1336a6512cdc 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -73,6 +73,17 @@ int nfsd_nrpools(void); int nfsd_get_nrthreads(int n, int *); int nfsd_set_nrthreads(int n, int *); +static inline void nfsd_destroy(struct net *net) +{ + int destroy = (nfsd_serv->sv_nrthreads == 1); + + if (destroy) + svc_shutdown_net(nfsd_serv, net); + svc_destroy(nfsd_serv); + if (destroy) + nfsd_serv = NULL; +} + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) #ifdef CONFIG_NFSD_V2_ACL extern struct svc_version nfsd_acl_version2; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index bcda12a3f082..53459b055cfc 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -254,8 +254,6 @@ static void nfsd_shutdown(void) static void nfsd_last_thread(struct svc_serv *serv, struct net *net) { - /* When last nfsd thread exits we need to do some clean-up */ - nfsd_serv = NULL; nfsd_shutdown(); svc_rpcb_cleanup(serv, net); @@ -332,6 +330,7 @@ static int nfsd_get_default_max_blksize(void) int nfsd_create_serv(void) { int error; + struct net *net = current->nsproxy->net_ns; WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nfsd_serv) { @@ -346,7 +345,7 @@ int nfsd_create_serv(void) if (nfsd_serv == NULL) return -ENOMEM; - error = svc_bind(nfsd_serv, current->nsproxy->net_ns); + error = svc_bind(nfsd_serv, net); if (error < 0) { svc_destroy(nfsd_serv); return error; @@ -427,11 +426,7 @@ int nfsd_set_nrthreads(int n, int *nthreads) if (err) break; } - - if (nfsd_serv->sv_nrthreads == 1) - svc_shutdown_net(nfsd_serv, net); - svc_destroy(nfsd_serv); - + nfsd_destroy(net); return err; } @@ -478,9 +473,7 @@ out_shutdown: if (error < 0 && !nfsd_up_before) nfsd_shutdown(); out_destroy: - if (nfsd_serv->sv_nrthreads == 1) - svc_shutdown_net(nfsd_serv, net); - svc_destroy(nfsd_serv); /* Release server */ + nfsd_destroy(net); /* Release server */ out: mutex_unlock(&nfsd_mutex); return error; @@ -563,12 +556,13 @@ nfsd(void *vrqstp) nfsdstats.th_cnt --; out: - if (rqstp->rq_server->sv_nrthreads == 1) - svc_shutdown_net(rqstp->rq_server, &init_net); + rqstp->rq_server = NULL; /* Release the thread */ svc_exit_thread(rqstp); + nfsd_destroy(&init_net); + /* Release module */ mutex_unlock(&nfsd_mutex); module_put_and_exit(0); @@ -656,7 +650,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) } /* Store reply in cache. */ - nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1); + nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1); return 1; } @@ -682,9 +676,7 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file) mutex_lock(&nfsd_mutex); /* this function really, really should have been called svc_put() */ - if (nfsd_serv->sv_nrthreads == 1) - svc_shutdown_net(nfsd_serv, net); - svc_destroy(nfsd_serv); + nfsd_destroy(net); mutex_unlock(&nfsd_mutex); return ret; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 568666156ea4..f03160106b95 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1477,13 +1477,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, case NFS3_CREATE_EXCLUSIVE: if ( dchild->d_inode->i_mtime.tv_sec == v_mtime && dchild->d_inode->i_atime.tv_sec == v_atime - && dchild->d_inode->i_size == 0 ) + && dchild->d_inode->i_size == 0 ) { + if (created) + *created = 1; break; + } case NFS4_CREATE_EXCLUSIVE4_1: if ( dchild->d_inode->i_mtime.tv_sec == v_mtime && dchild->d_inode->i_atime.tv_sec == v_atime - && dchild->d_inode->i_size == 0 ) + && dchild->d_inode->i_size == 0 ) { + if (created) + *created = 1; goto set_attr; + } /* fallthru */ case NFS3_CREATE_GUARDED: err = nfserr_exist; diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 6fe98ed58545..372949338915 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -666,8 +666,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, if (ret < 0) printk(KERN_ERR "NILFS: GC failed during preparation: " "cannot read source blocks: err=%d\n", ret); - else + else { + if (nilfs_sb_need_update(nilfs)) + set_nilfs_discontinued(nilfs); ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); + } nilfs_remove_all_gcinodes(nilfs); clear_nilfs_gc_running(nilfs); diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index f35794b97e8e..a50636025364 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) if ((old->path.mnt == new->path.mnt) && (old->path.dentry == new->path.dentry)) return true; + break; case (FSNOTIFY_EVENT_NONE): return true; default: diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 8445fbc8985c..6f292dd53c6d 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, /* don't allow invalid bits: we don't want flags set */ mask = inotify_arg_to_mask(arg); - if (unlikely(!(mask & IN_ALL_EVENTS))) - return -EINVAL; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) @@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group, /* don't allow invalid bits: we don't want flags set */ mask = inotify_arg_to_mask(arg); - if (unlikely(!(mask & IN_ALL_EVENTS))) - return -EINVAL; tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); if (unlikely(!tmp_i_mark)) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 657743254eb9..340bd02839be 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); + inode_dio_done(inode); if (is_async) aio_complete(iocb, ret, 0); - inode_dio_done(inode); } /* diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 81a4cd22f80b..231eab2b2d07 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb, * everything is up to the caller :) */ status = ocfs2_should_refresh_lock_res(lockres); if (status < 0) { + ocfs2_cluster_unlock(osb, lockres, level); mlog_errno(status); goto bail; } @@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb, ocfs2_complete_lock_res_refresh(lockres, status); - if (status < 0) + if (status < 0) { + ocfs2_cluster_unlock(osb, lockres, level); mlog_errno(status); + } ocfs2_track_lock_refresh(lockres); } bail: diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index f169da4624fd..b7e74b580c0f 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle, * cluster groups will be staying in cache for the duration of * this operation. */ - ac->ac_allow_chain_relink = 0; + ac->ac_disable_chain_relink = 1; /* Claim the first region */ status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits, @@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, * Do this *after* figuring out how many bits we're taking out * of our target group. */ - if (ac->ac_allow_chain_relink && + if (!ac->ac_disable_chain_relink && (prev_group_bh) && (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) { status = ocfs2_relink_block_group(handle, alloc_inode, @@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, victim = ocfs2_find_victim_chain(cl); ac->ac_chain = victim; - ac->ac_allow_chain_relink = 1; status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, res, &bits_left); @@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, * searching each chain in order. Don't allow chain relinking * because we only calculate enough journal credits for one * relink per alloc. */ - ac->ac_allow_chain_relink = 0; + ac->ac_disable_chain_relink = 1; for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) { if (i == victim) continue; diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index b8afabfeede4..a36d0aa50911 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -49,7 +49,7 @@ struct ocfs2_alloc_context { /* these are used by the chain search */ u16 ac_chain; - int ac_allow_chain_relink; + int ac_disable_chain_relink; group_search_t *ac_group_search; u64 ac_last_group; diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 0ba9ea1e7961..2e3ea308c144 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir, struct buffer_head *dir_bh = NULL; ret = ocfs2_init_security_get(inode, dir, qstr, NULL); - if (!ret) { + if (ret) { mlog_errno(ret); goto leave; } diff --git a/fs/open.c b/fs/open.c index 3f1108b19143..cf1d34fc5e69 100644 --- a/fs/open.c +++ b/fs/open.c @@ -882,9 +882,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o int lookup_flags = 0; int acc_mode; - if (!(flags & O_CREAT)) - mode = 0; - op->mode = mode; + if (flags & O_CREAT) + op->mode = (mode & S_IALLUGO) | S_IFREG; + else + op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY; diff --git a/fs/proc/page.c b/fs/proc/page.c index 7fcd0d60a968..b8730d9ebaee 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page) u |= 1 << KPF_COMPOUND_TAIL; if (PageHuge(page)) u |= 1 << KPF_HUGE; - else if (PageTransCompound(page)) + /* + * PageTransCompound can be true for non-huge compound pages (slab + * pages or pages allocated by drivers with __GFP_COMP) because it + * just checks PG_head/PG_tail, so we need to check PageLRU to make + * sure a given page is a thp, not a non-huge compound page. + */ + else if (PageTransCompound(page) && PageLRU(compound_trans_head(page))) u |= 1 << KPF_THP; /* diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 21d836f40292..ab5352101db5 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -462,9 +462,6 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); - if (h) - sysctl_head_finish(h); - if (!inode) goto out; @@ -473,6 +470,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, d_add(dentry, inode); out: + if (h) + sysctl_head_finish(h); sysctl_head_finish(head); return err; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 6c91b17cb5fd..98bf0c2738d6 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu) static u64 get_idle_time(int cpu) { - u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); + u64 idle, idle_time = -1ULL; + + if (cpu_online(cpu)) + idle_time = get_cpu_idle_time_us(cpu, NULL); if (idle_time == -1ULL) - /* !NO_HZ so we can rely on cpustat.idle */ + /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; else idle = usecs_to_cputime64(idle_time); @@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu) static u64 get_iowait_time(int cpu) { - u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); + u64 iowait, iowait_time = -1ULL; + + if (cpu_online(cpu)) + iowait_time = get_cpu_iowait_time_us(cpu, NULL); if (iowait_time == -1ULL) - /* !NO_HZ so we can rely on cpustat.iowait */ + /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; else iowait = usecs_to_cputime64(iowait_time); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 82c585f715e3..4a66a5c4a63f 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -88,6 +88,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) } } +bool pstore_cannot_block_path(enum kmsg_dump_reason reason) +{ + /* + * In case of NMI path, pstore shouldn't be blocked + * regardless of reason. + */ + if (in_nmi()) + return true; + + switch (reason) { + /* In panic case, other cpus are stopped by smp_send_stop(). */ + case KMSG_DUMP_PANIC: + /* Emergency restart shouldn't be blocked by spin lock. */ + case KMSG_DUMP_EMERG: + return true; + default: + return false; + } +} +EXPORT_SYMBOL_GPL(pstore_cannot_block_path); + /* * callback from kmsg_dump. (s2,l2) has the most recently * written bytes, older bytes are in (s1,l1). Save as much @@ -111,10 +132,12 @@ static void pstore_dump(struct kmsg_dumper *dumper, why = get_reason_str(reason); - if (in_nmi()) { - is_locked = spin_trylock(&psinfo->buf_lock); - if (!is_locked) - pr_err("pstore dump routine blocked in NMI, may corrupt error record\n"); + if (pstore_cannot_block_path(reason)) { + is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags); + if (!is_locked) { + pr_err("pstore dump routine blocked in %s path, may corrupt error record\n" + , in_nmi() ? "NMI" : why); + } } else spin_lock_irqsave(&psinfo->buf_lock, flags); oopscount++; @@ -145,9 +168,9 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += l1_cpy + l2_cpy; part++; } - if (in_nmi()) { + if (pstore_cannot_block_path(reason)) { if (is_locked) - spin_unlock(&psinfo->buf_lock); + spin_unlock_irqrestore(&psinfo->buf_lock, flags); } else spin_unlock_irqrestore(&psinfo->buf_lock, flags); } diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 494c315c7417..c11db5134b72 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, reiserfs_warning(sb, "reiserfs-13077", "nfsd/reiserfs, fhtype=%d, len=%d - odd", fh_type, fh_len); - fh_type = 5; + fh_type = fh_len; } + if (fh_len < 2) + return NULL; return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1], (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0); @@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { + if (fh_type > fh_len) + fh_type = fh_len; if (fh_type < 4) return NULL; @@ -1784,8 +1788,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - dquot_initialize(inode); + reiserfs_write_unlock(inode->i_sb); err = dquot_alloc_inode(inode); + reiserfs_write_lock(inode->i_sb); if (err) goto out_end_trans; if (!dir->i_nlink) { @@ -1981,8 +1986,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, out_end_trans: journal_end(th, th->t_super, th->t_blocks_allocated); + reiserfs_write_unlock(inode->i_sb); /* Drop can be outside and it needs more credits so it's better to have it outside */ dquot_drop(inode); + reiserfs_write_lock(inode->i_sb); inode->i_flags |= S_NOQUOTA; make_bad_inode(inode); @@ -3105,10 +3112,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) /* must be turned off for recursive notify_change calls */ ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); - depth = reiserfs_write_lock_once(inode->i_sb); if (is_quota_modification(inode, attr)) dquot_initialize(inode); - + depth = reiserfs_write_lock_once(inode->i_sb); if (attr->ia_valid & ATTR_SIZE) { /* version 2 items will be caught by the s_maxbytes check ** done for us in vmtruncate @@ -3172,7 +3178,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) error = journal_begin(&th, inode->i_sb, jbegin_count); if (error) goto out; + reiserfs_write_unlock_once(inode->i_sb, depth); error = dquot_transfer(inode, attr); + depth = reiserfs_write_lock_once(inode->i_sb); if (error) { journal_end(&th, inode->i_sb, jbegin_count); goto out; diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index f8afa4b162b8..2f40a4c70a4d 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree key2type(&(key->on_disk_key))); #endif + reiserfs_write_unlock(inode->i_sb); retval = dquot_alloc_space_nodirty(inode, pasted_size); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(search_path); return retval; @@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif + reiserfs_write_unlock(inode->i_sb); /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ retval = dquot_alloc_space_nodirty(inode, quota_bytes); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(path); return retval; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 8b7616ef06d8..8169be93ac0f 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -256,7 +256,9 @@ static int finish_unfinished(struct super_block *s) retval = remove_save_link_only(s, &save_link_key, 0); continue; } + reiserfs_write_unlock(s); dquot_initialize(inode); + reiserfs_write_lock(s); if (truncate && S_ISDIR(inode->i_mode)) { /* We got a truncate request for a dir which is impossible. @@ -1292,7 +1294,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) kfree(qf_names[i]); #endif err = -EINVAL; - goto out_err; + goto out_unlock; } #ifdef CONFIG_QUOTA handle_quota_files(s, qf_names, &qfmt); @@ -1336,7 +1338,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (blocks) { err = reiserfs_resize(s, blocks); if (err != 0) - goto out_err; + goto out_unlock; } if (*mount_flags & MS_RDONLY) { @@ -1346,9 +1348,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) /* it is read-only already */ goto out_ok; + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_suspend() without it. + */ + reiserfs_write_unlock(s); err = dquot_suspend(s, -1); if (err < 0) goto out_err; + reiserfs_write_lock(s); /* try to remount file system with read-only permissions */ if (sb_umount_state(rs) == REISERFS_VALID_FS @@ -1358,7 +1366,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mounting a rw partition read-only. */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1373,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (reiserfs_is_journal_aborted(journal)) { err = journal->j_errno; - goto out_err; + goto out_unlock; } handle_data_mode(s, mount_options); @@ -1382,7 +1390,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mount a partition which is read-only, read-write */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1399,11 +1407,17 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) SB_JOURNAL(s)->j_must_wait = 1; err = journal_end(&th, s, 10); if (err) - goto out_err; + goto out_unlock; s->s_dirt = 0; if (!(*mount_flags & MS_RDONLY)) { + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_resume() without it. + */ + reiserfs_write_unlock(s); dquot_resume(s, -1); + reiserfs_write_lock(s); finish_unfinished(s); reiserfs_xattr_init(s, *mount_flags); } @@ -1413,9 +1427,10 @@ out_ok: reiserfs_write_unlock(s); return 0; +out_unlock: + reiserfs_write_unlock(s); out_err: kfree(new_opts); - reiserfs_write_unlock(s); return err; } @@ -2049,13 +2064,15 @@ static int reiserfs_write_dquot(struct dquot *dquot) REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_commit(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2071,13 +2088,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot) REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_acquire(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2091,19 +2110,21 @@ static int reiserfs_release_dquot(struct dquot *dquot) ret = journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + reiserfs_write_unlock(dquot->dq_sb); if (ret) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); goto out; } ret = dquot_release(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: reiserfs_write_unlock(dquot->dq_sb); +out: return ret; } @@ -2128,11 +2149,13 @@ static int reiserfs_write_info(struct super_block *sb, int type) ret = journal_begin(&th, sb, 2); if (ret) goto out; + reiserfs_write_unlock(sb); ret = dquot_commit_info(sb, type); + reiserfs_write_lock(sb); err = journal_end(&th, sb, 2); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(sb); return ret; } @@ -2157,8 +2180,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, struct reiserfs_transaction_handle th; int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; - if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) - return -EINVAL; + reiserfs_write_lock(sb); + if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) { + err = -EINVAL; + goto out; + } /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) { @@ -2200,8 +2226,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (err) goto out; } - err = dquot_quota_on(sb, type, format_id, path); + reiserfs_write_unlock(sb); + return dquot_quota_on(sb, type, format_id, path); out: + reiserfs_write_unlock(sb); return err; } @@ -2275,7 +2303,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; + reiserfs_write_lock(sb); err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); + reiserfs_write_unlock(sb); if (err) goto out; if (offset || tocopy != sb->s_blocksize) @@ -2291,10 +2321,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); unlock_buffer(bh); + reiserfs_write_lock(sb); reiserfs_prepare_for_journal(sb, bh, 1); journal_mark_dirty(current->journal_info, sb, bh); if (!journal_quota) reiserfs_add_ordered_list(inode, bh); + reiserfs_write_unlock(sb); brelse(bh); offset = 0; towrite -= tocopy; diff --git a/fs/splice.c b/fs/splice.c index 5cac690f8103..bed6a3c29355 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -696,8 +696,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, return -EINVAL; more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; - if (sd->len < sd->total_len) + + if (sd->len < sd->total_len && pipe->nrbufs > 1) more |= MSG_SENDPAGE_NOTLAST; + return file->f_op->sendpage(file, buf->page, buf->offset, sd->len, &pos, more); } diff --git a/fs/stat.c b/fs/stat.c index c733dc5753ae..dc6d0be300ca 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -57,7 +57,7 @@ EXPORT_SYMBOL(vfs_getattr); int vfs_fstat(unsigned int fd, struct kstat *stat) { - struct file *f = fget(fd); + struct file *f = fget_raw(fd); int error = -EBADF; if (f) { diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 35a36d39fa2c..a545d819b495 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -457,20 +457,18 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) /** * sysfs_pathname - return full path to sysfs dirent * @sd: sysfs_dirent whose path we want - * @path: caller allocated buffer + * @path: caller allocated buffer of size PATH_MAX * * Gives the name "/" to the sysfs_root entry; any path returned * is relative to wherever sysfs is mounted. - * - * XXX: does no error checking on @path size */ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path) { if (sd->s_parent) { sysfs_pathname(sd->s_parent, path); - strcat(path, "/"); + strlcat(path, "/", PATH_MAX); } - strcat(path, sd->s_name); + strlcat(path, sd->s_name, PATH_MAX); return path; } @@ -503,9 +501,11 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) char *path = kzalloc(PATH_MAX, GFP_KERNEL); WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s'\n", - (path == NULL) ? sd->s_name : - strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"), - sd->s_name)); + (path == NULL) ? sd->s_name + : (sysfs_pathname(acxt->parent_sd, path), + strlcat(path, "/", PATH_MAX), + strlcat(path, sd->s_name, PATH_MAX), + path)); kfree(path); } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 9f717655df18..28e3de1892b9 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -170,7 +170,7 @@ struct ubifs_global_debug_info { #define ubifs_dbg_msg(type, fmt, ...) \ pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) -#define DBG_KEY_BUF_LEN 32 +#define DBG_KEY_BUF_LEN 48 #define ubifs_dbg_msg_key(type, key, fmt, ...) do { \ char __tmp_key_buf[DBG_KEY_BUF_LEN]; \ pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \ diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ec9f1870ab7f..8640a12f0650 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -977,7 +977,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec time; - unsigned int saved_nlink; + unsigned int uninitialized_var(saved_nlink); /* * Budget request settings: deletion direntry, new direntry, removing diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index 2559d174e004..5dc48cacb5a0 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c @@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) if (!lprops) { lprops = ubifs_fast_find_freeable(c); if (!lprops) { - ubifs_assert(c->freeable_cnt == 0); - if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + /* + * The first condition means the following: go scan the + * LPT if there are uncategorized lprops, which means + * there may be freeable LEBs there (UBIFS does not + * store the information about freeable LEBs in the + * master node). + */ + if (c->in_a_category_cnt != c->main_lebs || + c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + ubifs_assert(c->freeable_cnt == 0); lprops = scan_for_leb_for_idx(c); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index f8a181e647cc..ea9d49164ffa 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, default: ubifs_assert(0); } + lprops->flags &= ~LPROPS_CAT_MASK; lprops->flags |= cat; + c->in_a_category_cnt += 1; + ubifs_assert(c->in_a_category_cnt <= c->main_lebs); } /** @@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, default: ubifs_assert(0); } + + c->in_a_category_cnt -= 1; + ubifs_assert(c->in_a_category_cnt >= 0); } /** diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 93d59aceaaef..4971cb23b6c8 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1184,6 +1184,8 @@ struct ubifs_debug_info; * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) * @freeable_cnt: number of freeable LEBs in @freeable_list + * @in_a_category_cnt: count of lprops which are in a certain category, which + * basically meants that they were loaded from the flash * * @ltab_lnum: LEB number of LPT's own lprops table * @ltab_offs: offset of LPT's own lprops table @@ -1413,6 +1415,7 @@ struct ubifs_info { struct list_head freeable_list; struct list_head frdi_idx_list; int freeable_cnt; + int in_a_category_cnt; int ltab_lnum; int ltab_offs; diff --git a/fs/udf/file.c b/fs/udf/file.c index 7f3f7ba3df6e..d1c6093fd3d3 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -39,20 +39,24 @@ #include "udf_i.h" #include "udf_sb.h" -static int udf_adinicb_readpage(struct file *file, struct page *page) +static void __udf_adinicb_readpage(struct page *page) { struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); - BUG_ON(!PageLocked(page)); - kaddr = kmap(page); - memset(kaddr, 0, PAGE_CACHE_SIZE); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); + memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); +} + +static int udf_adinicb_readpage(struct file *file, struct page *page) +{ + BUG_ON(!PageLocked(page)); + __udf_adinicb_readpage(page); unlock_page(page); return 0; @@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page, return 0; } +static int udf_adinicb_write_begin(struct file *file, + struct address_space *mapping, loff_t pos, + unsigned len, unsigned flags, struct page **pagep, + void **fsdata) +{ + struct page *page; + + if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) + return -EIO; + page = grab_cache_page_write_begin(mapping, 0, flags); + if (!page) + return -ENOMEM; + *pagep = page; + + if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) + __udf_adinicb_readpage(page); + return 0; +} + static int udf_adinicb_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, @@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file, const struct address_space_operations udf_adinicb_aops = { .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, - .write_begin = simple_write_begin, - .write_end = udf_adinicb_write_end, + .write_begin = udf_adinicb_write_begin, + .write_end = udf_adinicb_write_end, }; static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 7d7528008359..aa70035fb402 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -574,6 +574,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; + bool isBeyondEOF; *err = 0; *new = 0; @@ -653,7 +654,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, /* Are we beyond EOF? */ if (etype == -1) { int ret; - + isBeyondEOF = 1; if (count) { if (c) laarr[0] = laarr[1]; @@ -696,6 +697,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, endnum = c + 1; lastblock = 1; } else { + isBeyondEOF = 0; endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, @@ -738,10 +740,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, goal, err); if (!newblocknum) { brelse(prev_epos.bh); + brelse(cur_epos.bh); + brelse(next_epos.bh); *err = -ENOSPC; return 0; } - iinfo->i_lenExtents += inode->i_sb->s_blocksize; + if (isBeyondEOF) + iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple @@ -768,6 +773,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); + brelse(cur_epos.bh); + brelse(next_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); diff --git a/fs/udf/super.c b/fs/udf/super.c index e660ffdd91b6..4988a8afcc8f 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1287,6 +1287,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, udf_err(sb, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); + ret = 1; goto out_bh; } @@ -1331,8 +1332,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { if (udf_load_sparable_map(sb, map, - (struct sparablePartitionMap *)gpm) < 0) + (struct sparablePartitionMap *)gpm) < 0) { + ret = 1; goto out_bh; + } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 0dbb9e70fe21..7a978c7be842 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -89,11 +89,11 @@ xfs_destroy_ioend( } if (ioend->io_iocb) { + inode_dio_done(ioend->io_inode); if (ioend->io_isasync) { aio_complete(ioend->io_iocb, ioend->io_error ? ioend->io_error : ioend->io_result, 0); } - inode_dio_done(ioend->io_inode); } mempool_free(ioend, xfs_ioend_pool); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 6819b5163e33..bb76128b5f47 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1165,9 +1165,14 @@ xfs_buf_bio_end_io( { xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; - xfs_buf_ioerror(bp, -error); + /* + * don't overwrite existing errors - otherwise we can lose errors on + * buffers that require multiple bios to complete. + */ + if (!bp->b_error) + xfs_buf_ioerror(bp, -error); - if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); _xfs_buf_ioend(bp, 1); @@ -1243,6 +1248,11 @@ next_chunk: if (size) goto next_chunk; } else { + /* + * This is guaranteed not to be the last io reference count + * because the caller (xfs_buf_iorequest) holds a count itself. + */ + atomic_dec(&bp->b_io_remaining); xfs_buf_ioerror(bp, EIO); bio_put(bio); } diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 558910f5e3c0..5703fb85f6bb 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; struct inode *inode = NULL; + if (fh_len < xfs_fileid_length(fileid_type)) + return NULL; + switch (fileid_type) { case FILEID_INO32_GEN_PARENT: inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 8ecad5bad66c..0abb1623a7cb 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3514,7 +3514,7 @@ xlog_do_recovery_pass( * - order is important. */ error = xlog_bread_offset(log, 0, - bblks - split_bblks, hbp, + bblks - split_bblks, dbp, offset + BBTOB(split_bblks)); if (error) goto bread_err2; diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 580a6d35c700..c04e0db8a2d6 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -26,7 +26,13 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - fail_fn(count); + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); } /** @@ -43,7 +49,8 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - return fail_fn(count); + if (likely(atomic_xchg(count, -1) != 1)) + return fail_fn(count); return 0; } diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f96a5b58a975..979ed15724a2 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -94,6 +102,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 4a0aae38e160..9242310b47cd 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -343,8 +343,9 @@ struct drm_mode_mode_cmd { struct drm_mode_modeinfo mode; }; -#define DRM_MODE_CURSOR_BO (1<<0) -#define DRM_MODE_CURSOR_MOVE (1<<1) +#define DRM_MODE_CURSOR_BO 0x01 +#define DRM_MODE_CURSOR_MOVE 0x02 +#define DRM_MODE_CURSOR_FLAGS 0x03 /* * depending on the value in flags different members are used. diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 53392e8f9564..7207a99bdf86 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -209,9 +209,12 @@ {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -221,6 +224,7 @@ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index da64e15004b6..6cdabb422de6 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h @@ -31,25 +31,16 @@ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION /* - * Architectures where both 32- and 64-bit binaries can be executed - * on 64-bit kernels need this. This keeps the structure format - * uniform, and makes sure the wait_queue_token isn't too big to be - * passed back down to the kernel. - * - * This assumes that on these architectures: - * mode 32 bit 64 bit - * ------------------------- - * int 32 bit 32 bit - * long 32 bit 64 bit - * - * If so, 32-bit user-space code should be backwards compatible. + * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed + * back to the kernel via ioctl from userspace. On architectures where 32- and + * 64-bit userspace binaries can be executed it's important that the size of + * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we + * do not break the binary ABI interface by changing the structure size. */ - -#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ - || defined(__powerpc__) || defined(__s390__) -typedef unsigned int autofs_wqt_t; -#else +#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */ typedef unsigned long autofs_wqt_t; +#else +typedef unsigned int autofs_wqt_t; #endif /* Packet types */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 366422bc1633..eb53e15ede68 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -128,6 +128,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); +extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); extern int prepare_bprm_creds(struct linux_binprm *bprm); diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index aa13392a7efb..d4080f309b56 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -14,6 +14,14 @@ struct ceph_auth_client; struct ceph_authorizer; +struct ceph_auth_handshake { + struct ceph_authorizer *authorizer; + void *authorizer_buf; + size_t authorizer_buf_len; + void *authorizer_reply_buf; + size_t authorizer_reply_buf_len; +}; + struct ceph_auth_client_ops { const char *name; @@ -43,9 +51,7 @@ struct ceph_auth_client_ops { * the response to authenticate the service. */ int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, - struct ceph_authorizer **a, - void **buf, size_t *len, - void **reply_buf, size_t *reply_len); + struct ceph_auth_handshake *auth); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len); void (*destroy_authorizer)(struct ceph_auth_client *ac, diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e71d683982a6..893420b78d0a 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -49,7 +49,6 @@ struct ceph_options { struct ceph_entity_addr my_addr; int mount_timeout; int osd_idle_ttl; - int osd_timeout; int osd_keepalive_timeout; /* @@ -69,7 +68,6 @@ struct ceph_options { * defaults */ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 -#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ #define CEPH_OSD_KEEPALIVE_DEFAULT 5 #define CEPH_OSD_IDLE_TTL_DEFAULT 60 @@ -132,7 +130,7 @@ struct ceph_client { u32 supported_features; u32 required_features; - struct ceph_messenger *msgr; /* messenger instance */ + struct ceph_messenger msgr; /* messenger instance */ struct ceph_mon_client monc; struct ceph_osd_client osdc; diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 3bff047f6b0f..189ae0637634 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -25,15 +25,12 @@ struct ceph_connection_operations { void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); /* authorize an outgoing connection */ - int (*get_authorizer) (struct ceph_connection *con, - void **buf, int *len, int *proto, - void **reply_buf, int *reply_len, int force_new); + struct ceph_auth_handshake *(*get_authorizer) ( + struct ceph_connection *con, + int *proto, int force_new); int (*verify_authorizer_reply) (struct ceph_connection *con, int len); int (*invalidate_authorizer)(struct ceph_connection *con); - /* protocol version mismatch */ - void (*bad_proto) (struct ceph_connection *con); - /* there was some error on the socket (disconnect, whatever) */ void (*fault) (struct ceph_connection *con); @@ -53,6 +50,7 @@ struct ceph_messenger { struct ceph_entity_inst inst; /* my name+address */ struct ceph_entity_addr my_enc_addr; + atomic_t stopping; bool nocrc; /* @@ -80,7 +78,10 @@ struct ceph_msg { unsigned nr_pages; /* size of page array */ unsigned page_alignment; /* io offset in first page */ struct ceph_pagelist *pagelist; /* instead of pages */ + + struct ceph_connection *con; struct list_head list_head; + struct kref kref; struct bio *bio; /* instead of pages/pagelist */ struct bio *bio_iter; /* bio iterator */ @@ -106,23 +107,6 @@ struct ceph_msg_pos { #define MAX_DELAY_INTERVAL (5 * 60 * HZ) /* - * ceph_connection state bit flags - */ -#define LOSSYTX 0 /* we can close channel or drop messages on errors */ -#define CONNECTING 1 -#define NEGOTIATING 2 -#define KEEPALIVE_PENDING 3 -#define WRITE_PENDING 4 /* we have data ready to send */ -#define STANDBY 8 /* no outgoing messages, socket closed. we keep - * the ceph_connection around to maintain shared - * state with the peer. */ -#define CLOSED 10 /* we've closed the connection */ -#define SOCK_CLOSED 11 /* socket state changed to closed */ -#define OPENING 13 /* open connection w/ (possibly new) peer */ -#define DEAD 14 /* dead, about to kfree */ -#define BACKOFF 15 - -/* * A single connection with another host. * * We maintain a queue of outgoing messages, and some session state to @@ -131,18 +115,22 @@ struct ceph_msg_pos { */ struct ceph_connection { void *private; - atomic_t nref; const struct ceph_connection_operations *ops; struct ceph_messenger *msgr; + + atomic_t sock_state; struct socket *sock; - unsigned long state; /* connection state (see flags above) */ + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_addr peer_addr_for_me; + + unsigned long flags; + unsigned long state; const char *error_msg; /* error message, if any */ - struct ceph_entity_addr peer_addr; /* peer address */ struct ceph_entity_name peer_name; /* peer name */ - struct ceph_entity_addr peer_addr_for_me; + unsigned peer_features; u32 connect_seq; /* identify the most recent connection attempt for this connection, client */ @@ -163,16 +151,8 @@ struct ceph_connection { /* connection negotiation temps */ char in_banner[CEPH_BANNER_MAX_LEN]; - union { - struct { /* outgoing connection */ - struct ceph_msg_connect out_connect; - struct ceph_msg_connect_reply in_reply; - }; - struct { /* incoming */ - struct ceph_msg_connect in_connect; - struct ceph_msg_connect_reply out_reply; - }; - }; + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; struct ceph_entity_addr actual_peer_addr; /* message out temps */ @@ -215,24 +195,26 @@ extern int ceph_msgr_init(void); extern void ceph_msgr_exit(void); extern void ceph_msgr_flush(void); -extern struct ceph_messenger *ceph_messenger_create( - struct ceph_entity_addr *myaddr, - u32 features, u32 required); -extern void ceph_messenger_destroy(struct ceph_messenger *); +extern void ceph_messenger_init(struct ceph_messenger *msgr, + struct ceph_entity_addr *myaddr, + u32 supported_features, + u32 required_features, + bool nocrc); -extern void ceph_con_init(struct ceph_messenger *msgr, - struct ceph_connection *con); +extern void ceph_con_init(struct ceph_connection *con, void *private, + const struct ceph_connection_operations *ops, + struct ceph_messenger *msgr); extern void ceph_con_open(struct ceph_connection *con, + __u8 entity_type, __u64 entity_num, struct ceph_entity_addr *addr); extern bool ceph_con_opened(struct ceph_connection *con); extern void ceph_con_close(struct ceph_connection *con); extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); -extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); -extern void ceph_con_revoke_message(struct ceph_connection *con, - struct ceph_msg *msg); + +extern void ceph_msg_revoke(struct ceph_msg *msg); +extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); + extern void ceph_con_keepalive(struct ceph_connection *con); -extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); -extern void ceph_con_put(struct ceph_connection *con); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index 545f85917780..2113e3850a4e 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -70,7 +70,7 @@ struct ceph_mon_client { bool hunting; int cur_mon; /* last monitor i contacted */ unsigned long sub_sent, sub_renew_after; - struct ceph_connection *con; + struct ceph_connection con; bool have_fsid; /* pending generic requests */ diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h index a362605f9368..09fa96b43436 100644 --- a/include/linux/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h @@ -11,10 +11,11 @@ struct ceph_msgpool { const char *name; mempool_t *pool; + int type; /* preallocated message type */ int front_len; /* preallocated payload size */ }; -extern int ceph_msgpool_init(struct ceph_msgpool *pool, +extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, int front_len, int size, bool blocking, const char *name); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 7c05ac202d90..d9b880e977e6 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -6,9 +6,10 @@ #include <linux/mempool.h> #include <linux/rbtree.h> -#include "types.h" -#include "osdmap.h" -#include "messenger.h" +#include <linux/ceph/types.h> +#include <linux/ceph/osdmap.h> +#include <linux/ceph/messenger.h> +#include <linux/ceph/auth.h> /* * Maximum object name size @@ -40,9 +41,7 @@ struct ceph_osd { struct list_head o_requests; struct list_head o_linger_requests; struct list_head o_osd_lru; - struct ceph_authorizer *o_authorizer; - void *o_authorizer_buf, *o_authorizer_reply_buf; - size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; + struct ceph_auth_handshake o_auth; unsigned long lru_ttl; int o_marked_for_keepalive; struct list_head o_keepalive_item; @@ -208,7 +207,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); -extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc, +extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, u64 snapid, u64 off, u64 *plen, u64 *bno, diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index ba4c205cbb01..11db454f7865 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -111,9 +111,9 @@ extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, extern void ceph_osdmap_destroy(struct ceph_osdmap *map); /* calculate mapping of a file extent to an object */ -extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, - u64 off, u64 *plen, - u64 *bno, u64 *oxoff, u64 *oxlen); +extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, + u64 off, u64 *plen, + u64 *bno, u64 *oxoff, u64 *oxlen); /* calculate mapping of object to a placement group */ extern int ceph_calc_object_layout(struct ceph_object_layout *ol, diff --git a/include/linux/console.h b/include/linux/console.h index 7201ce4280ca..f59e9428d06b 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */ int con_is_bound(const struct consw *csw); int register_con_driver(const struct consw *csw, int first, int last); int unregister_con_driver(const struct consw *csw); +int do_unregister_con_driver(const struct consw *csw); int take_over_console(const struct consw *sw, int first, int last, int deflt); +int do_take_over_console(const struct consw *sw, int first, int last, int deflt); void give_up_console(const struct consw *sw); #ifdef CONFIG_HW_CONSOLE int con_debug_enter(struct vc_data *vc); diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 97e435b191f4..e7a8c9055cb2 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -168,7 +168,7 @@ struct crush_map { /* crush.c */ -extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos); +extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos); extern void crush_calc_parents(struct crush_map *map); extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); extern void crush_destroy_bucket_list(struct crush_bucket_list *b); @@ -177,4 +177,9 @@ extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); extern void crush_destroy_bucket(struct crush_bucket *b); extern void crush_destroy(struct crush_map *map); +static inline int crush_calc_tree_node(int i) +{ + return ((i+1) << 1)-1; +} + #endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index c46b99c18bb0..9322ab8bccd8 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -10,11 +10,11 @@ #include "crush.h" -extern int crush_find_rule(struct crush_map *map, int pool, int type, int size); -extern int crush_do_rule(struct crush_map *map, +extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); +extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, int forcefeed, /* -1 for none */ - __u32 *weights); + const __u32 *weights); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 7e11f1418203..1332df02d8f2 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -191,6 +191,8 @@ struct dentry_operations { #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) +#define DCACHE_DENTRY_KILLED 0x100000 + extern seqlock_t rename_lock; static inline int dname_external(struct dentry *dentry) diff --git a/include/linux/efi.h b/include/linux/efi.h index ec45ccd8708a..eee8b0b190ea 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -496,6 +496,11 @@ extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#ifdef CONFIG_X86 +extern void efi_free_boot_services(void); +#else +static inline void efi_free_boot_services(void) {} +#endif extern u64 efi_get_iobase (void); extern u32 efi_mem_type (unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); @@ -534,18 +539,30 @@ extern int __init efi_setup_pcdp_console(char *); #endif /* - * We play games with efi_enabled so that the compiler will, if possible, remove - * EFI-related code altogether. + * We play games with efi_enabled so that the compiler will, if + * possible, remove EFI-related code altogether. */ +#define EFI_BOOT 0 /* Were we booted from EFI? */ +#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */ +#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ +#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ +#define EFI_MEMMAP 4 /* Can we use EFI memory map? */ +#define EFI_64BIT 5 /* Is the firmware 64-bit? */ + #ifdef CONFIG_EFI # ifdef CONFIG_X86 - extern int efi_enabled; - extern bool efi_64bit; +extern int efi_enabled(int facility); # else -# define efi_enabled 1 +static inline int efi_enabled(int facility) +{ + return 1; +} # endif #else -# define efi_enabled 0 +static inline int efi_enabled(int facility) +{ + return 0; +} #endif /* diff --git a/include/linux/freezer.h b/include/linux/freezer.h index a628084eea3c..b79e4d879d22 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -86,28 +86,62 @@ static inline bool cgroup_freezing(struct task_struct *task) */ -/* Tell the freezer not to count the current task as freezable. */ +/** + * freezer_do_not_count - tell freezer to ignore %current + * + * Tell freezers to ignore the current task when determining whether the + * target frozen state is reached. IOW, the current task will be + * considered frozen enough by freezers. + * + * The caller shouldn't do anything which isn't allowed for a frozen task + * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair + * wrap a scheduling operation and nothing much else. + */ static inline void freezer_do_not_count(void) { current->flags |= PF_FREEZER_SKIP; } -/* - * Tell the freezer to count the current task as freezable again and try to - * freeze it. +/** + * freezer_count - tell freezer to stop ignoring %current + * + * Undo freezer_do_not_count(). It tells freezers that %current should be + * considered again and tries to freeze if freezing condition is already in + * effect. */ static inline void freezer_count(void) { current->flags &= ~PF_FREEZER_SKIP; + /* + * If freezing is in progress, the following paired with smp_mb() + * in freezer_should_skip() ensures that either we see %true + * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP. + */ + smp_mb(); try_to_freeze(); } -/* - * Check if the task should be counted as freezable by the freezer +/** + * freezer_should_skip - whether to skip a task when determining frozen + * state is reached + * @p: task in quesion + * + * This function is used by freezers after establishing %true freezing() to + * test whether a task should be skipped when determining the target frozen + * state is reached. IOW, if this function returns %true, @p is considered + * frozen enough. */ -static inline int freezer_should_skip(struct task_struct *p) +static inline bool freezer_should_skip(struct task_struct *p) { - return !!(p->flags & PF_FREEZER_SKIP); + /* + * The following smp_mb() paired with the one in freezer_count() + * ensures that either freezer_count() sees %true freezing() or we + * see cleared %PF_FREEZER_SKIP and return %false. This makes it + * impossible for a task to slip frozen state testing after + * clearing %PF_FREEZER_SKIP. + */ + smp_mb(); + return p->flags & PF_FREEZER_SKIP; } /* diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a810987cb80e..9b0c6142eaed 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -82,6 +82,8 @@ static inline int is_vlan_dev(struct net_device *dev) } #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define vlan_tx_nonzero_tag_present(__skb) \ + (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK)) #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) @@ -91,7 +93,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); -extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); +extern bool vlan_do_receive(struct sk_buff **skb); extern struct sk_buff *vlan_untag(struct sk_buff *skb); extern int vlan_vid_add(struct net_device *dev, unsigned short vid); @@ -120,10 +122,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev) return 0; } -static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler) +static inline bool vlan_do_receive(struct sk_buff **skb) { - if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler) - (*skb)->pkt_type = PACKET_OTHERHOST; return false; } @@ -327,7 +327,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { __be16 proto; - unsigned char *rawp; + unsigned short *rawp; /* * Was a VLAN packet, grab the encapsulated protocol, which the layer @@ -340,8 +340,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, return; } - rawp = skb->data; - if (*(unsigned short *) rawp == 0xFFFF) + rawp = (unsigned short *)(vhdr + 1); + if (*rawp == 0xFFFF) /* * This is a magic hack to spot IPX packets. Older Novell * breaks the protocol design and runs IPX over 802.3 without diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e4baff5f7ff4..e7bafa432aa3 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -123,8 +123,17 @@ extern struct group_info init_groups; extern struct cred init_cred; +extern struct task_group root_task_group; + +#ifdef CONFIG_CGROUP_SCHED +# define INIT_CGROUP_SCHED(tsk) \ + .sched_task_group = &root_task_group, +#else +# define INIT_CGROUP_SCHED(tsk) +#endif + #ifdef CONFIG_PERF_EVENTS -# define INIT_PERF_EVENTS(tsk) \ +# define INIT_PERF_EVENTS(tsk) \ .perf_event_mutex = \ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), @@ -161,6 +170,7 @@ extern struct cred init_cred; }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ INIT_PUSHABLE_TASKS(tsk) \ + INIT_CGROUP_SCHED(tsk) \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .real_parent = &tsk, \ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index fc615a97e2d3..1e57449395b1 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj, static inline __printf(2, 3) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) -{ return 0; } +{ return -ENOMEM; } static inline int kobject_action_type(const char *buf, size_t count, enum kobject_action *type) diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 603bec2913b0..06177ba10a16 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -58,13 +58,6 @@ union ktime { typedef union ktime ktime_t; /* Kill this */ -#define KTIME_MAX ((s64)~((u64)1 << 63)) -#if (BITS_PER_LONG == 64) -# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) -#else -# define KTIME_SEC_MAX LONG_MAX -#endif - /* * ktime_t definitions when using the 64-bit scalar representation: */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 19dc455b4f3d..c948c440ea2c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); +void memblock_trim_memory(phys_addr_t align); #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, diff --git a/include/linux/memory.h b/include/linux/memory.h index 1ac7f6e405f9..ff9a9f8e0ed9 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -19,7 +19,7 @@ #include <linux/compiler.h> #include <linux/mutex.h> -#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) +#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) struct memory_block { unsigned long start_section_nr; diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 7c727a90d70d..fe07e5a90569 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) __mpol_put(pol); } -extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol); -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!frompol) - return frompol; - return __mpol_cond_copy(tompol, frompol); -} - extern struct mempolicy *__mpol_dup(struct mempolicy *pol); static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { @@ -188,7 +178,7 @@ struct sp_node { struct shared_policy { struct rb_root root; - spinlock_t lock; + struct mutex mutex; }; void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); @@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) { } -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, - struct mempolicy *from) -{ - return from; -} - static inline void mpol_get(struct mempolicy *pol) { } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 22d11242eb93..1289e9f3cb4b 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -251,6 +251,7 @@ struct mmc_card { #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ /* byte mode */ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ #define MMC_NO_POWER_NOTIFICATION 0 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1d1b1e13f79f..ee2baf034fc4 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -4,6 +4,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/srcu.h> struct mmu_notifier; struct mmu_notifier_ops; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 1482340d3d9f..248351344718 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -215,9 +215,6 @@ typedef enum { #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ && (chip->page_shift > 9)) -/* Mask to zero out the chip options, which come from the id table */ -#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) - /* Non chip related options */ /* This option skips the bbt scan during initialization. */ #define NAND_SKIP_BBTSCAN 0x00010000 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 33900a53c990..e517695704cc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1279,6 +1279,8 @@ struct net_device { /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; +#define GSO_MAX_SEGS 65535 + u16 gso_max_segs; #ifdef CONFIG_DCB /* Data Center Bridging netlink ops */ @@ -1494,6 +1496,8 @@ struct packet_type { struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); + bool (*id_match)(struct packet_type *ptype, + struct sock *sk); void *af_packet_priv; struct list_head list; }; diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 47923205a4ad..41d9cfa08167 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -30,6 +30,10 @@ ip_set_timeout_uget(struct nlattr *tb) { unsigned int timeout = ip_set_get_h32(tb); + /* Normalize to fit into jiffies */ + if (timeout > UINT_MAX/MSEC_PER_SEC) + timeout = UINT_MAX/MSEC_PER_SEC; + /* Userspace supplied TIMEOUT parameter: adjust crazy size */ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; } diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a2092f582a78..b23e9cd6a888 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -153,6 +153,7 @@ struct nlattr { #include <linux/capability.h> #include <linux/skbuff.h> +#include <linux/export.h> struct net; @@ -226,6 +227,8 @@ struct netlink_callback { struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); void *data; + /* the module that dump function belong to */ + struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq, seq; @@ -251,14 +254,24 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) struct netlink_dump_control { int (*dump)(struct sk_buff *skb, struct netlink_callback *); - int (*done)(struct netlink_callback*); + int (*done)(struct netlink_callback *); void *data; + struct module *module; u16 min_dump_alloc; }; -extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, - const struct nlmsghdr *nlh, - struct netlink_dump_control *control); +extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control); +static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control) +{ + if (!control->module) + control->module = THIS_MODULE; + + return __netlink_dump_start(ssk, skb, nlh, control); +} #define NL_NONROOT_RECV 0x1 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 52a1bdb4ee2b..941d688c2034 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -264,11 +264,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) return NFS_SERVER(inode)->nfs_client->rpc_ops; } -static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) -{ - return NFS_I(inode)->cookieverf; -} - static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c88d2a9451af..4dabf0fe527e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -361,7 +361,7 @@ static inline void ClearPageCompound(struct page *page) * pages on the LRU and/or pagecache. */ TESTPAGEFLAG(Compound, compound) -__PAGEFLAG(Head, compound) +__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) /* * PG_reclaim is used in combination with PG_compound to mark the @@ -373,8 +373,14 @@ __PAGEFLAG(Head, compound) * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page */ +#define PG_head_mask ((1L << PG_compound)) #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) +static inline int PageHead(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_mask); +} + static inline int PageTail(struct page *page) { return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3329965ed63f..d7dbf4ec89d2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1565,6 +1565,7 @@ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 @@ -1846,7 +1847,6 @@ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 -#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 #define PCI_VENDOR_ID_RADISYS 0x1331 @@ -2148,7 +2148,7 @@ #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa -#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ddbb6a901f65..f18d537fd4ba 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -925,7 +925,7 @@ struct perf_event { struct hw_perf_event hw; struct perf_event_context *ctx; - struct file *filp; + atomic_long_t refcount; /* * These accumulate total time (in nanoseconds) that children diff --git a/include/linux/pstore.h b/include/linux/pstore.h index e1461e143be2..318cca1dee9b 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -54,12 +54,18 @@ struct pstore_info { #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); +extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); #else static inline int pstore_register(struct pstore_info *psi) { return -ENODEV; } +static inline bool +pstore_cannot_block_path(enum kmsg_dump_reason reason) +{ + return false; +} #endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/quota.h b/include/linux/quota.h index c09fa042b5ea..ffd8607ca4be 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -417,6 +417,7 @@ struct quota_module_name { #define INIT_QUOTA_MODULE_NAMES {\ {QFMT_VFS_OLD, "quota_v1"},\ {QFMT_VFS_V0, "quota_v2"},\ + {QFMT_VFS_V1, "quota_v2"},\ {0, NULL}} #endif /* __KERNEL__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d18f24f50b8..43ae5a574fdb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1280,6 +1280,9 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -2696,7 +2699,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); -extern void signal_wake_up(struct task_struct *t, int resume_stopped); +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} /* * Wrappers for p->thread_info->cpu access. No-op on UP. @@ -2748,7 +2760,7 @@ extern int sched_group_set_rt_period(struct task_group *tg, extern long sched_group_rt_period(struct task_group *tg); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif -#endif +#endif /* CONFIG_CGROUP_SCHED */ extern int task_can_switch_user(struct user_struct *up, struct task_struct *tsk); diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 2e68f5ba0389..a33f70fa1bfd 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -208,7 +208,6 @@ enum LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */ LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */ LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */ - LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */ LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */ LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */ LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */ @@ -234,6 +233,8 @@ enum LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */ LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */ + LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */ + LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */ __LINUX_MIB_MAX }; diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index f5fd6160dbca..dd2bb84526a4 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -228,6 +228,22 @@ static inline int get_int(char **bpp, int *anint) return 0; } +static inline int get_uint(char **bpp, unsigned int *anint) +{ + char buf[50]; + int len = qword_get(bpp, buf, sizeof(buf)); + + if (len < 0) + return -EINVAL; + if (len == 0) + return -ENOENT; + + if (kstrtouint(buf, 0, anint)) + return -EINVAL; + + return 0; +} + /* * timestamps kept in the cache are expressed in seconds * since boot. This is the best for measuring differences in diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 77d278defa70..005b507d6d74 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -114,6 +114,7 @@ struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_task *task); @@ -279,6 +280,8 @@ void xprt_connect(struct rpc_task *task); void xprt_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_prepare_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 38911391a139..ce4c66556059 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -47,6 +47,12 @@ #define SYSLOG_FROM_CALL 0 #define SYSLOG_FROM_FILE 1 +/* + * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>' + * See RFC5424 for details +*/ +#define SYSLOG_PRI_MAX_LENGTH 5 + int do_syslog(int type, char __user *buf, int count, bool from_file); #endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/time.h b/include/linux/time.h index 8da51299a7d5..03dce74e217c 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs, return ts_delta; } +#define KTIME_MAX ((s64)~((u64)1 << 63)) +#if (BITS_PER_LONG == 64) +# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#else +# define KTIME_SEC_MAX LONG_MAX +#endif + /* * Returns true if the timespec is norm, false if denorm: */ -#define timespec_valid(ts) \ - (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) +static inline bool timespec_valid(const struct timespec *ts) +{ + /* Dates before 1970 are bogus */ + if (ts->tv_sec < 0) + return false; + /* Can't have more nanoseconds then a second */ + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_valid_strict(const struct timespec *ts) +{ + if (!timespec_valid(ts)) + return false; + /* Disallow values that could overflow ktime_t */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return false; + return true; +} extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index a54b8255d75f..6f8b026b3396 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); - return desc->baSourceID[desc->bNrInPins + control_size]; + return *(uac_processing_unit_bmControls(desc, protocol) + + control_size); } static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); - return &desc->baSourceID[desc->bNrInPins + control_size + 1]; + return uac_processing_unit_bmControls(desc, protocol) + + control_size + 1; } /* 4.5.2 Class-Specific AS Interface Descriptor */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index af21f3115919..091fdb7b9d5f 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -150,6 +150,12 @@ #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) +/* + * Interface status, Figure 9-5 USB 3.0 spec + */ +#define USB_INTRF_STAT_FUNC_RW_CAP 1 +#define USB_INTRF_STAT_FUNC_RW 2 + #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index e33d77f15bda..57eeb1454558 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); void scrollback(struct vc_data *vc, int lines); void scrollfront(struct vc_data *vc, int lines); +void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); void redraw_screen(struct vc_data *vc, int is_switch); #define update_screen(x) redraw_screen(x, 0) @@ -131,6 +132,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new); int vt_waitactive(int n); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); +extern int do_unbind_con_driver(const struct consw *csw, int first, int last, + int deflt); extern int unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 22e61fdf75a2..28e493b5b94c 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -84,6 +84,8 @@ struct xfrm_replay_state { __u32 bitmap; }; +#define XFRMA_REPLAY_ESN_MAX 4096 + struct xfrm_replay_state_esn { unsigned int bmp_len; __u32 oseq; diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h index 7b3acdd29134..1bf9bec8e4fe 100644 --- a/include/net/bluetooth/smp.h +++ b/include/net/bluetooth/smp.h @@ -136,7 +136,7 @@ struct smp_chan { }; /* SMP Commands */ -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ef9d5dbec00e..ea9692bd438b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2489,6 +2489,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); /** + * ieee80211_get_mesh_hdrlen - get mesh extension header length + * @meshhdr: the mesh extension header, only the flags field + * (first byte) will be accessed + * Returns the length of the extension header, which is always at + * least 6 bytes and at most 18 if address 5 and 6 are present. + */ +unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); + +/** * DOC: Data path helpers * * In addition to generic utilities, cfg80211 also offers diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 00cbb4384c79..2da45ce8acba 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -28,16 +28,16 @@ struct inet_hashinfo; -/* I have no idea if this is a good hash for v6 or not. -DaveM */ static inline unsigned int inet6_ehashfn(struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) { - u32 ports = (lport ^ (__force u16)fport); + u32 ports = (((u32)lport) << 16) | (__force u32)fport; return jhash_3words((__force u32)laddr->s6_addr32[3], - (__force u32)faddr->s6_addr32[3], - ports, inet_ehash_secret + net_hash_mix(net)); + ipv6_addr_jhash(faddr), + ports, + inet_ehash_secret + net_hash_mix(net)); } static inline int inet6_sk_ehashfn(const struct sock *sk) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index dbf9aab34c82..e9b05ded948b 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -317,6 +317,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent, const unsigned long max_rto); extern void inet_csk_destroy_sock(struct sock *sk); +extern void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index ae17e1352d7e..8cd2e1d0ec32 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -202,6 +202,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, extern int inet_sk_rebuild_header(struct sock *sk); extern u32 inet_ehash_secret; +extern u32 ipv6_hash_secret; extern void build_ehash_secret(void); static inline unsigned int inet_ehashfn(struct net *net, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 72522f087375..2389959464c3 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct || !nf_ct_is_untracked(ct)) { - nf_reset(skb); + nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e4170a22fc6f..12a1bd2e9879 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -15,6 +15,7 @@ #include <linux/ipv6.h> #include <linux/hardirq.h> +#include <linux/jhash.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/flow.h> @@ -390,6 +391,17 @@ struct ip6_create_arg { void ip6_frag_init(struct inet_frag_queue *q, void *a); int ip6_frag_match(struct inet_frag_queue *q, void *a); +/* more secured version of ipv6_addr_hash() */ +static inline u32 ipv6_addr_jhash(const struct in6_addr *a) +{ + u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; + + return jhash_3words(v, + (__force u32)a->s6_addr32[2], + (__force u32)a->s6_addr32[3], + ipv6_hash_secret); +} + static inline int ipv6_addr_any(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index a88fb6939387..ea6f8a48ffe5 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -18,6 +18,7 @@ struct nf_conntrack_ecache { u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ u32 pid; /* netlink pid of destroyer */ + struct timer_list timeout; }; static inline struct nf_conntrack_ecache * diff --git a/include/net/scm.h b/include/net/scm.h index d456f4c71a32..0c0017ce23bb 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm) } static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm) + struct scm_cookie *scm, bool forcecreds) { memset(scm, 0, sizeof(*scm)); + if (forcecreds) + scm_set_cred(scm, task_tgid(current), current_cred()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; diff --git a/include/net/sock.h b/include/net/sock.h index 5a0a58ac4126..59a8947a1944 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -216,6 +216,7 @@ struct cg_proto; * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_gso_max_size: Maximum GSO segment size to build + * @sk_gso_max_segs: Maximum number of GSO segments * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -335,6 +336,7 @@ struct sock { netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; @@ -942,7 +944,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); } -inline void sk_refcnt_debug_release(const struct sock *sk) +static inline void sk_refcnt_debug_release(const struct sock *sk) { if (atomic_read(&sk->sk_refcnt) != 1) printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", diff --git a/include/net/tcp.h b/include/net/tcp.h index 50660b3e72b8..ed5b0c70d86a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -252,6 +252,7 @@ extern int sysctl_tcp_max_ssthresh; extern int sysctl_tcp_cookie_size; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; +extern int sysctl_tcp_challenge_ack_limit; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 96239e78e621..9f7e94ba6696 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -269,6 +269,9 @@ struct xfrm_replay { int (*check)(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); + int (*recheck)(struct xfrm_state *x, + struct sk_buff *skb, + __be32 net_seq); void (*notify)(struct xfrm_state *x, int event); int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); }; diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 3c5363ab867b..bd3d8b24b420 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -39,6 +39,7 @@ struct rdma_cm_id_stats { struct ibnl_client_cbs { int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); + struct module *module; }; int ibnl_init(void); diff --git a/include/sound/core.h b/include/sound/core.h index bc056687f647..93896ad1fcdd 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -132,6 +132,7 @@ struct snd_card { int shutdown; /* this card is going down */ int free_on_last_close; /* free in context of file_release */ wait_queue_head_t shutdown_sleep; + atomic_t refcount; /* refcount for disconnection */ struct device *dev; /* device assigned to this card */ struct device *card_dev; /* cardX object for sysfs */ @@ -189,6 +190,7 @@ struct snd_minor { const struct file_operations *f_ops; /* file operations */ void *private_data; /* private data for f_ops->open */ struct device *dev; /* device for sysfs */ + struct snd_card *card_ptr; /* assigned card instance */ }; /* return a device pointer linked to each sound device as a parent */ @@ -295,6 +297,7 @@ int snd_card_info_done(void); int snd_component_add(struct snd_card *card, const char *component); int snd_card_file_add(struct snd_card *card, struct file *file); int snd_card_file_remove(struct snd_card *card, struct file *file); +void snd_card_unref(struct snd_card *card); #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr)) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 3ad5b33ee328..569c28236b09 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -779,6 +779,8 @@ struct se_subsystem_dev { }; struct se_device { +#define SE_DEV_LINK_MAGIC 0xfeeddeef + u32 dev_link_magic; /* RELATIVE TARGET PORT IDENTIFER Counter */ u16 dev_rpti_counter; /* Used for SAM Task Attribute ordering */ @@ -869,6 +871,8 @@ struct se_port_stat_grps { }; struct se_lun { +#define SE_LUN_LINK_MAGIC 0xffff7771 + u32 lun_link_magic; /* See transport_lun_status_table */ enum transport_lun_status_table lun_status; u32 lun_access; diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 5f889f16b0c8..08fa27244da7 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc, TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page, TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, __entry->order == 0) diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 92f1a796829e..348c4febdcbb 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -377,6 +377,14 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd, DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); +TRACE_EVENT(xen_mmu_flush_tlb_all, + TP_PROTO(int x), + TP_ARGS(x), + TP_STRUCT__entry(__array(char, x, 0)), + TP_fast_assign((void)x), + TP_printk("%s", "") + ); + TRACE_EVENT(xen_mmu_flush_tlb, TP_PROTO(int x), TP_ARGS(x), diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 15f8a00ff003..f0037a89f7db 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -185,6 +185,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, - struct page **pages, unsigned int count, bool clear_pte); + struct gnttab_map_grant_ref *kunmap_ops, + struct page **pages, unsigned int count); #endif /* __ASM_GNTTAB_H__ */ diff --git a/init/main.c b/init/main.c index b08c5f75974f..02c138465780 100644 --- a/init/main.c +++ b/init/main.c @@ -602,7 +602,7 @@ asmlinkage void __init start_kernel(void) pidmap_init(); anon_vma_init(); #ifdef CONFIG_X86 - if (efi_enabled) + if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); #endif thread_info_cache_init(); @@ -630,6 +630,9 @@ asmlinkage void __init start_kernel(void) acpi_early_init(); /* before LAPIC and SMP init */ sfi_init_late(); + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_free_boot_services(); + ftrace_init(); /* Do the rest non-__init'ed, we're now alive */ diff --git a/kernel/async.c b/kernel/async.c index bd0c168a3bbe..32d8dc960263 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -86,6 +86,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) { struct async_entry *entry; + if (!running) { /* just check the entry count */ + if (atomic_read(&entry_count)) + return 0; /* smaller than any cookie */ + else + return next_cookie; + } + if (!list_empty(running)) { entry = list_first_entry(running, struct async_entry, list); @@ -236,9 +243,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain); */ void async_synchronize_full(void) { - do { - async_synchronize_cookie(next_cookie); - } while (!list_empty(&async_running) || !list_empty(&async_pending)); + async_synchronize_cookie_domain(next_cookie, NULL); } EXPORT_SYMBOL_GPL(async_synchronize_full); @@ -258,7 +263,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); /** * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint - * @running: running list to synchronize on + * @running: running list to synchronize on, NULL indicates all lists * * This function waits until all asynchronous function calls for the * synchronization domain specified by the running list @list submitted diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 5bf0790497e7..31fdc480b5c6 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -250,7 +250,6 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); goto out; } @@ -259,7 +258,7 @@ static void untag_chunk(struct node *p) fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { - free_chunk(new); + fsnotify_put_mark(&new->mark); goto Fallback; } @@ -293,7 +292,6 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); goto out; Fallback: @@ -322,7 +320,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) entry = &chunk->mark; if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { - free_chunk(chunk); + fsnotify_put_mark(entry); return -ENOSPC; } @@ -332,6 +330,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&entry->lock); + fsnotify_get_mark(entry); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); return 0; @@ -396,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) fsnotify_duplicate_mark(chunk_entry, old_entry); if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); - free_chunk(chunk); + fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; } @@ -412,6 +411,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); + fsnotify_get_mark(chunk_entry); fsnotify_destroy_mark(chunk_entry); fsnotify_put_mark(chunk_entry); @@ -445,7 +445,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&old_entry->lock); fsnotify_destroy_mark(old_entry); fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ - fsnotify_put_mark(old_entry); /* and kill it */ return 0; } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 145deeb69bc3..ad447684aa47 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -386,10 +386,22 @@ static void free_css_set_work(struct work_struct *work) struct cgroup *cgrp = link->cgrp; list_del(&link->cg_link_list); list_del(&link->cgrp_link_list); - if (atomic_dec_and_test(&cgrp->count)) { + + + /* + * We may not be holding cgroup_mutex, and if cgrp->count is + * dropped to 0 the cgroup can be destroyed at any time, hence + * rcu_read_lock is used to keep it alive. + */ + rcu_read_lock(); + if (atomic_dec_and_test(&cgrp->count) && + notify_on_release(cgrp)) { check_for_release(cgrp); cgroup_wakeup_rmdir_waiter(cgrp); } + + rcu_read_unlock(); + kfree(link); } write_unlock(&css_set_lock); @@ -1878,9 +1890,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, * trading it for newcg is protected by cgroup_mutex, we're safe to drop * it here; it will be freed under RCU. */ - put_css_set(oldcg); - set_bit(CGRP_RELEASABLE, &oldcgrp->flags); + put_css_set(oldcg); } /** @@ -2623,9 +2634,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, dentry->d_fsdata = cgrp; inc_nlink(parent->d_inode); rcu_assign_pointer(cgrp->dentry, dentry); - dget(dentry); } - dput(dentry); return error; } @@ -4533,31 +4542,20 @@ static const struct file_operations proc_cgroupstats_operations = { * * A pointer to the shared css_set was automatically copied in * fork.c by dup_task_struct(). However, we ignore that copy, since - * it was not made under the protection of RCU, cgroup_mutex or - * threadgroup_change_begin(), so it might no longer be a valid - * cgroup pointer. cgroup_attach_task() might have already changed - * current->cgroups, allowing the previously referenced cgroup - * group to be removed and freed. - * - * Outside the pointer validity we also need to process the css_set - * inheritance between threadgoup_change_begin() and - * threadgoup_change_end(), this way there is no leak in any process - * wide migration performed by cgroup_attach_proc() that could otherwise - * miss a thread because it is too early or too late in the fork stage. + * it was not made under the protection of RCU or cgroup_mutex, so + * might no longer be a valid cgroup pointer. cgroup_attach_task() might + * have already changed current->cgroups, allowing the previously + * referenced cgroup group to be removed and freed. * * At the point that cgroup_fork() is called, 'current' is the parent * task, and the passed argument 'child' points to the child task. */ void cgroup_fork(struct task_struct *child) { - /* - * We don't need to task_lock() current because current->cgroups - * can't be changed concurrently here. The parent obviously hasn't - * exited and called cgroup_exit(), and we are synchronized against - * cgroup migration through threadgroup_change_begin(). - */ + task_lock(current); child->cgroups = current->cgroups; get_css_set(child->cgroups); + task_unlock(current); INIT_LIST_HEAD(&child->cg_list); } @@ -4610,19 +4608,10 @@ void cgroup_post_fork(struct task_struct *child) */ if (use_task_css_set_links) { write_lock(&css_set_lock); - if (list_empty(&child->cg_list)) { - /* - * It's safe to use child->cgroups without task_lock() - * here because we are protected through - * threadgroup_change_begin() against concurrent - * css_set change in cgroup_task_migrate(). Also - * the task can't exit at that point until - * wake_up_new_task() is called, so we are protected - * against cgroup_exit() setting child->cgroup to - * init_css_set. - */ + task_lock(child); + if (list_empty(&child->cg_list)) list_add(&child->cg_list, &child->cgroups->tasks); - } + task_unlock(child); write_unlock(&css_set_lock); } } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 48b90d30797f..de11f2f560de 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2100,6 +2100,9 @@ static void scan_for_empty_cpusets(struct cpuset *root) * (of no affect) on systems that are actively using CPU hotplug * but making no active use of cpusets. * + * The only exception to this is suspend/resume, where we don't + * modify cpusets at all. + * * This routine ensures that top_cpuset.cpus_allowed tracks * cpu_active_mask on each CPU hotplug (cpuhp) event. * @@ -2511,8 +2514,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk) dentry = task_cs(tsk)->css.cgroup->dentry; spin_lock(&cpuset_buffer_lock); - snprintf(cpuset_name, CPUSET_NAME_LEN, - dentry ? (const char *)dentry->d_name.name : "/"); + + if (!dentry) { + strcpy(cpuset_name, "/"); + } else { + spin_lock(&dentry->d_lock); + strlcpy(cpuset_name, (const char *)dentry->d_name.name, + CPUSET_NAME_LEN); + spin_unlock(&dentry->d_lock); + } + nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, tsk->mems_allowed); printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 18a4cb33c52b..81b31bba4ae5 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -560,6 +560,7 @@ int vkdb_printf(const char *fmt, va_list ap) { int diag; int linecount; + int colcount; int logging, saved_loglevel = 0; int saved_trap_printk; int got_printf_lock = 0; @@ -592,6 +593,10 @@ int vkdb_printf(const char *fmt, va_list ap) if (diag || linecount <= 1) linecount = 24; + diag = kdbgetintenv("COLUMNS", &colcount); + if (diag || colcount <= 1) + colcount = 80; + diag = kdbgetintenv("LOGGING", &logging); if (diag) logging = 0; @@ -698,7 +703,7 @@ kdb_printit: gdbstub_msg_write(kdb_buffer, retlen); } else { if (dbg_io_ops && !dbg_io_ops->is_console) { - len = strlen(kdb_buffer); + len = retlen; cp = kdb_buffer; while (len--) { dbg_io_ops->write_char(*cp); @@ -717,11 +722,29 @@ kdb_printit: printk(KERN_INFO "%s", kdb_buffer); } - if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n')) - kdb_nextline++; + if (KDB_STATE(PAGER)) { + /* + * Check printed string to decide how to bump the + * kdb_nextline to control when the more prompt should + * show up. + */ + int got = 0; + len = retlen; + while (len--) { + if (kdb_buffer[len] == '\n') { + kdb_nextline++; + got = 0; + } else if (kdb_buffer[len] == '\r') { + got = 0; + } else { + got++; + } + } + kdb_nextline += got / (colcount + 1); + } /* check for having reached the LINES number of printed lines */ - if (kdb_nextline == linecount) { + if (kdb_nextline >= linecount) { char buf1[16] = ""; #if defined(CONFIG_SMP) char buf2[32]; @@ -784,7 +807,7 @@ kdb_printit: kdb_grepping_flag = 0; kdb_printf("\n"); } else if (buf1[0] == ' ') { - kdb_printf("\n"); + kdb_printf("\r"); suspend_grep = 1; /* for this recursion */ } else if (buf1[0] == '\n') { kdb_nextline = linecount - 1; diff --git a/kernel/events/core.c b/kernel/events/core.c index fd126f82b57c..228fdb042fad 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2929,12 +2929,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); /* * Called when the last reference to the file is gone. */ -static int perf_release(struct inode *inode, struct file *file) +static void put_event(struct perf_event *event) { - struct perf_event *event = file->private_data; struct task_struct *owner; - file->private_data = NULL; + if (!atomic_long_dec_and_test(&event->refcount)) + return; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); @@ -2969,7 +2969,13 @@ static int perf_release(struct inode *inode, struct file *file) put_task_struct(owner); } - return perf_event_release_kernel(event); + perf_event_release_kernel(event); +} + +static int perf_release(struct inode *inode, struct file *file) +{ + put_event(file->private_data); + return 0; } u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) @@ -3222,7 +3228,7 @@ unlock: static const struct file_operations perf_fops; -static struct perf_event *perf_fget_light(int fd, int *fput_needed) +static struct file *perf_fget_light(int fd, int *fput_needed) { struct file *file; @@ -3236,7 +3242,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed) return ERR_PTR(-EBADF); } - return file->private_data; + return file; } static int perf_event_set_output(struct perf_event *event, @@ -3268,19 +3274,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: { + struct file *output_file = NULL; struct perf_event *output_event = NULL; int fput_needed = 0; int ret; if (arg != -1) { - output_event = perf_fget_light(arg, &fput_needed); - if (IS_ERR(output_event)) - return PTR_ERR(output_event); + output_file = perf_fget_light(arg, &fput_needed); + if (IS_ERR(output_file)) + return PTR_ERR(output_file); + output_event = output_file->private_data; } ret = perf_event_set_output(event, output_event); if (output_event) - fput_light(output_event->filp, fput_needed); + fput_light(output_file, fput_needed); return ret; } @@ -5920,6 +5928,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->mmap_mutex); + atomic_long_set(&event->refcount, 1); event->cpu = cpu; event->attr = *attr; event->group_leader = group_leader; @@ -6230,12 +6239,12 @@ SYSCALL_DEFINE5(perf_event_open, return event_fd; if (group_fd != -1) { - group_leader = perf_fget_light(group_fd, &fput_needed); - if (IS_ERR(group_leader)) { - err = PTR_ERR(group_leader); + group_file = perf_fget_light(group_fd, &fput_needed); + if (IS_ERR(group_file)) { + err = PTR_ERR(group_file); goto err_fd; } - group_file = group_leader->filp; + group_leader = group_file->private_data; if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) @@ -6370,7 +6379,6 @@ SYSCALL_DEFINE5(perf_event_open, put_ctx(gctx); } - event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); @@ -6460,7 +6468,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err_free; } - event->filp = NULL; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_install_in_context(ctx, event, cpu); @@ -6509,7 +6516,7 @@ static void sync_child_event(struct perf_event *child_event, * Release the parent event, if this was the last * reference to it. */ - fput(parent_event->filp); + put_event(parent_event); } static void @@ -6585,9 +6592,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * * __perf_event_exit_task() * sync_child_event() - * fput(parent_event->filp) - * perf_release() - * mutex_lock(&ctx->mutex) + * put_event() + * mutex_lock(&ctx->mutex) * * But since its the parent context it won't be the same instance. */ @@ -6655,7 +6661,7 @@ static void perf_free_event(struct perf_event *event, list_del_init(&event->child_list); mutex_unlock(&parent->child_mutex); - fput(parent->filp); + put_event(parent); perf_group_detach(event); list_del_event(event, ctx); @@ -6735,6 +6741,12 @@ inherit_event(struct perf_event *parent_event, NULL, NULL); if (IS_ERR(child_event)) return child_event; + + if (!atomic_long_inc_not_zero(&parent_event->refcount)) { + free_event(child_event); + return NULL; + } + get_ctx(child_ctx); /* @@ -6776,14 +6788,6 @@ inherit_event(struct perf_event *parent_event, raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* - * Get a reference to the parent filp - we will fput it - * when the child event exits. This is safe to do because - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ - atomic_long_inc(&parent_event->filp->f_count); - - /* * Link this into the parent event's child list */ WARN_ON_ONCE(parent_event->ctx->parent_ctx); diff --git a/kernel/futex.c b/kernel/futex.c index 3717e7b306e0..887943044d46 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { - int lock_taken, ret, ownerdied = 0; + int lock_taken, ret, force_take = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: @@ -755,17 +755,15 @@ retry: newval = curval | FUTEX_WAITERS; /* - * There are two cases, where a futex might have no owner (the - * owner TID is 0): OWNER_DIED. We take over the futex in this - * case. We also do an unconditional take over, when the owner - * of the futex died. - * - * This is safe as we are protected by the hash bucket lock ! + * Should we force take the futex? See below. */ - if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { - /* Keep the OWNER_DIED bit */ + if (unlikely(force_take)) { + /* + * Keep the OWNER_DIED and the WAITERS bit and set the + * new TID value. + */ newval = (curval & ~FUTEX_TID_MASK) | vpid; - ownerdied = 0; + force_take = 0; lock_taken = 1; } @@ -775,7 +773,7 @@ retry: goto retry; /* - * We took the lock due to owner died take over. + * We took the lock due to forced take over. */ if (unlikely(lock_taken)) return 1; @@ -790,20 +788,25 @@ retry: switch (ret) { case -ESRCH: /* - * No owner found for this futex. Check if the - * OWNER_DIED bit is set to figure out whether - * this is a robust futex or not. + * We failed to find an owner for this + * futex. So we have no pi_state to block + * on. This can happen in two cases: + * + * 1) The owner died + * 2) A stale FUTEX_WAITERS bit + * + * Re-read the futex value. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* - * We simply start over in case of a robust - * futex. The code above will take the futex - * and return happy. + * If the owner died or we have a stale + * WAITERS bit the owner TID in the user space + * futex is 0. */ - if (curval & FUTEX_OWNER_DIED) { - ownerdied = 1; + if (!(curval & FUTEX_TID_MASK)) { + force_take = 1; goto retry; } default: @@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; + if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) + return; + /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non-futex wake up happens on another CPU then the task @@ -1075,6 +1081,10 @@ retry_private: plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key1)) { + if (this->pi_state || this->rt_waiter) { + ret = -EINVAL; + goto out_unlock; + } wake_futex(this); if (++ret >= nr_wake) break; @@ -1087,6 +1097,10 @@ retry_private: op_ret = 0; plist_for_each_entry_safe(this, next, head, list) { if (match_futex (&this->key, &key2)) { + if (this->pi_state || this->rt_waiter) { + ret = -EINVAL; + goto out_unlock; + } wake_futex(this); if (++op_ret >= nr_wake2) break; @@ -1095,6 +1109,7 @@ retry_private: ret += op_ret; } +out_unlock: double_unlock_hb(hb1, hb2); out_put_keys: put_futex_key(&key2); @@ -1384,9 +1399,13 @@ retry_private: /* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. + * + * We should never be requeueing a futex_q with a pi_state, + * which is awaiting a futex_unlock_pi(). */ if ((requeue_pi && !this->rt_waiter) || - (!requeue_pi && this->rt_waiter)) { + (!requeue_pi && this->rt_waiter) || + this->pi_state) { ret = -EINVAL; break; } @@ -2452,8 +2471,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, if (!futex_cmpxchg_enabled) return -ENOSYS; - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); - rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 83e368b005fc..a9642d528630 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, if (!futex_cmpxchg_enabled) return -ENOSYS; - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); - rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 6db7a5ed52b5..cdd5607c0ceb 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) * and expiry check is done in the hrtimer_interrupt or in the softirq. */ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base, - int wakeup) + struct hrtimer_clock_base *base) { - if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { - if (wakeup) { - raw_spin_unlock(&base->cpu_base->lock); - raise_softirq_irqoff(HRTIMER_SOFTIRQ); - raw_spin_lock(&base->cpu_base->lock); - } else - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); - - return 1; - } - - return 0; + return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); } static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) @@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base, - int wakeup) + struct hrtimer_clock_base *base) { return 0; } @@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * * XXX send_remote_softirq() ? */ - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) - hrtimer_enqueue_reprogram(timer, new_base, wakeup); + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) + && hrtimer_enqueue_reprogram(timer, new_base)) { + if (wakeup) { + /* + * We need to drop cpu_base->lock to avoid a + * lock ordering issue vs. rq->lock. + */ + raw_spin_unlock(&new_base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + local_irq_restore(flags); + return ret; + } else { + __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + } unlock_hrtimer_base(timer, &flags); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b9d1d83ec381..7684920f4f66 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -708,6 +708,7 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; + bool valid = true; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; @@ -722,10 +723,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) } raw_spin_lock_irq(&desc->lock); - cpumask_copy(mask, desc->irq_data.affinity); + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (desc->irq_data.affinity) + cpumask_copy(mask, desc->irq_data.affinity); + else + valid = false; raw_spin_unlock_irq(&desc->lock); - set_cpus_allowed_ptr(current, mask); + if (valid) + set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else @@ -933,6 +942,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ get_task_struct(t); new->thread = t; + /* + * Tell the thread to set its affinity. This is + * important for shared interrupt handlers as we do + * not invoke setup_affinity() for the secondary + * handlers as everything is already set up. Even for + * interrupts marked with IRQF_NO_BALANCE this is + * correct as we want the thread to move to the cpu(s) + * on which the requesting code placed the interrupt. + */ + set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 611cd6003c45..7b5f012bde9d 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) /* * All handlers must agree on IRQF_SHARED, so we test just the - * first. Check for action->next as well. + * first. */ action = desc->action; if (!action || !(action->flags & IRQF_SHARED) || - (action->flags & __IRQF_TIMER) || - (action->handler(irq, action->dev_id) == IRQ_HANDLED) || - !action->next) + (action->flags & __IRQF_TIMER)) goto out; /* Already running on another processor */ @@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) do { if (handle_irq_event(desc) == IRQ_HANDLED) ret = IRQ_HANDLED; + /* Make sure that there is still a valid action */ action = desc->action; } while ((desc->istate & IRQS_PENDING) && action); desc->istate &= ~IRQS_POLL_INPROGRESS; diff --git a/kernel/module.c b/kernel/module.c index b084bf116fc4..b0d9eb4f076a 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2273,12 +2273,17 @@ static void layout_symtab(struct module *mod, struct load_info *info) src = (void *)info->hdr + symsect->sh_offset; nsrc = symsect->sh_size / sizeof(*src); + /* strtab always starts with a nul, so offset 0 is the empty string. */ + strtab_size = 1; + /* Compute total space required for the core symbols' strtab. */ - for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src) - if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { - strtab_size += strlen(&info->strtab[src->st_name]) + 1; + for (ndst = i = 0; i < nsrc; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + strtab_size += strlen(&info->strtab[src[i].st_name])+1; ndst++; } + } /* Append room for core symbols at end of core part. */ info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); @@ -2312,15 +2317,15 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) mod->core_symtab = dst = mod->module_core + info->symoffs; mod->core_strtab = s = mod->module_core + info->stroffs; src = mod->symtab; - *dst = *src; *s++ = 0; - for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { - if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) - continue; - - dst[ndst] = *src; - dst[ndst++].st_name = s - mod->core_strtab; - s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1; + for (ndst = i = 0; i < mod->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; + dst[ndst++].st_name = s - mod->core_strtab; + s += strlcpy(s, &mod->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } } mod->core_num_syms = ndst; } @@ -2729,6 +2734,10 @@ static int check_module_license_and_versions(struct module *mod) if (strcmp(mod->name, "driverloader") == 0) add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + /* lve claims to be GPL but upstream won't provide source */ + if (strcmp(mod->name, "lve") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + #ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs) diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 125cb67daa21..acbb79c9092c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1422,8 +1422,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, while (!signal_pending(current)) { if (timer.it.cpu.expires.sched == 0) { /* - * Our timer fired and was reset. + * Our timer fired and was reset, below + * deletion can not fail. */ + posix_cpu_timer_del(&timer); spin_unlock_irq(&timer.it_lock); return 0; } @@ -1441,9 +1443,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, * We were interrupted by a signal. */ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); - posix_cpu_timer_set(&timer, 0, &zero_it, it); + error = posix_cpu_timer_set(&timer, 0, &zero_it, it); + if (!error) { + /* + * Timer is now unarmed, deletion can not fail. + */ + posix_cpu_timer_del(&timer); + } spin_unlock_irq(&timer.it_lock); + while (error == TIMER_RETRY) { + /* + * We need to handle case when timer was or is in the + * middle of firing. In other cases we already freed + * resources. + */ + spin_lock_irq(&timer.it_lock); + error = posix_cpu_timer_del(&timer); + spin_unlock_irq(&timer.it_lock); + } + if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { /* * It actually did fire already. diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 69185ae6b701..e885be1e8a11 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; + /* + * timer_t could be any type >= int and we want to make sure any + * @timer_id outside positive int range fails lookup. + */ + if ((unsigned long long)timer_id > INT_MAX) + return NULL; + rcu_read_lock(); timr = idr_find(&posix_timers_id, (int)timer_id); if (timr) { diff --git a/kernel/printk.c b/kernel/printk.c index 7a8b101b237a..c518d79d9150 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -689,8 +689,19 @@ static void call_console_drivers(unsigned start, unsigned end) start_print = start; while (cur_index != end) { if (msg_level < 0 && ((end - cur_index) > 2)) { + /* + * prepare buf_prefix, as a contiguous array, + * to be processed by log_prefix function + */ + char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1]; + unsigned i; + for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) { + buf_prefix[i] = LOG_BUF(cur_index + i); + } + buf_prefix[i] = '\0'; /* force '\0' as last string character */ + /* strip log prefix */ - cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL); + cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL); start_print = cur_index; } while (cur_index != end) { diff --git a/kernel/ptrace.c b/kernel/ptrace.c index ee8d49b9c309..daf4394d1aba 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child) * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) - signal_wake_up(child, task_is_traced(child)); + ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } +/* Ensure that nothing can wake it up, even SIGKILL */ +static bool ptrace_freeze_traced(struct task_struct *task) +{ + bool ret = false; + + /* Lockless, nobody but us can set this flag */ + if (task->jobctl & JOBCTL_LISTENING) + return ret; + + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !__fatal_signal_pending(task)) { + task->state = __TASK_TRACED; + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); + + return ret; +} + +static void ptrace_unfreeze_traced(struct task_struct *task) +{ + if (task->state != __TASK_TRACED) + return; + + WARN_ON(!task->ptrace || task->parent != current); + + spin_lock_irq(&task->sighand->siglock); + if (__fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + else + task->state = TASK_TRACED; + spin_unlock_irq(&task->sighand->siglock); +} + /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for @@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); - if ((child->ptrace & PT_PTRACED) && child->parent == current) { + if (child->ptrace && child->parent == current) { + WARN_ON(child->state == __TASK_TRACED); /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ - spin_lock_irq(&child->sighand->siglock); - WARN_ON_ONCE(task_is_stopped(child)); - if (ignore_state || (task_is_traced(child) && - !(child->jobctl & JOBCTL_LISTENING))) + if (ignore_state || ptrace_freeze_traced(child)) ret = 0; - spin_unlock_irq(&child->sighand->siglock); } read_unlock(&tasklist_lock); - if (!ret && !ignore_state) - ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; + if (!ret && !ignore_state) { + if (!wait_task_inactive(child, __TASK_TRACED)) { + /* + * This can only happen if may_ptrace_stop() fails and + * ptrace_stop() changes ->state back to TASK_RUNNING, + * so we should not worry about leaking __TASK_TRACED. + */ + WARN_ON(child->state == __TASK_TRACED); + ret = -ESRCH; + } + } - /* All systems go.. */ return ret; } @@ -311,7 +350,7 @@ static int ptrace_attach(struct task_struct *task, long request, */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) - signal_wake_up(task, 1); + signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); @@ -728,7 +767,7 @@ int ptrace_request(struct task_struct *child, long request, * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) - signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; @@ -754,7 +793,7 @@ int ptrace_request(struct task_struct *child, long request, * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) - signal_wake_up(child, true); + ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); @@ -891,6 +930,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); + if (ret || request != PTRACE_DETACH) + ptrace_unfreeze_traced(child); out_put_task_struct: put_task_struct(child); @@ -1030,8 +1071,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); - if (!ret) + if (!ret) { ret = compat_arch_ptrace(child, request, addr, data); + if (ret || request != PTRACE_DETACH) + ptrace_unfreeze_traced(child); + } out_put_task_struct: put_task_struct(child); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d0c5baf1ab18..62c5e9cf6fcb 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -202,13 +202,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks = ATOMIC_INIT(1), }; -static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ -static int qhimark = 10000; /* If this many pending, ignore blimit. */ -static int qlowmark = 100; /* Once only this many pending, use blimit. */ +static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ +static long qhimark = 10000; /* If this many pending, ignore blimit. */ +static long qlowmark = 100; /* Once only this many pending, use blimit. */ -module_param(blimit, int, 0); -module_param(qhimark, int, 0); -module_param(qlowmark, int, 0); +module_param(blimit, long, 0); +module_param(qhimark, long, 0); +module_param(qlowmark, long, 0); int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; @@ -295,7 +295,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { - return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); + return *rdp->nxttail[RCU_DONE_TAIL + + ACCESS_ONCE(rsp->completed) != rdp->completed] && + !rcu_gp_in_progress(rsp); } /* @@ -1474,7 +1476,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_head *next, *list, **tail; - int bl, count, count_lazy; + long bl, count, count_lazy; /* If no callbacks are ready, just return.*/ if (!cpu_has_callbacks_ready_to_invoke(rdp)) { diff --git a/kernel/resource.c b/kernel/resource.c index 7e8ea66a8c01..bfe96b83497c 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -758,6 +758,7 @@ static void __init __reserve_region_with_split(struct resource *root, struct resource *parent = root; struct resource *conflict; struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); + struct resource *next_res = NULL; if (!res) return; @@ -767,21 +768,46 @@ static void __init __reserve_region_with_split(struct resource *root, res->end = end; res->flags = IORESOURCE_BUSY; - conflict = __request_resource(parent, res); - if (!conflict) - return; + while (1) { - /* failed, split and try again */ - kfree(res); + conflict = __request_resource(parent, res); + if (!conflict) { + if (!next_res) + break; + res = next_res; + next_res = NULL; + continue; + } - /* conflict covered whole area */ - if (conflict->start <= start && conflict->end >= end) - return; + /* conflict covered whole area */ + if (conflict->start <= res->start && + conflict->end >= res->end) { + kfree(res); + WARN_ON(next_res); + break; + } + + /* failed, split and try again */ + if (conflict->start > res->start) { + end = res->end; + res->end = conflict->start - 1; + if (conflict->end < end) { + next_res = kzalloc(sizeof(*next_res), + GFP_ATOMIC); + if (!next_res) { + kfree(res); + break; + } + next_res->name = name; + next_res->start = conflict->end + 1; + next_res->end = end; + next_res->flags = IORESOURCE_BUSY; + } + } else { + res->start = conflict->end + 1; + } + } - if (conflict->start > start) - __reserve_region_with_split(root, start, conflict->start-1, name); - if (conflict->end < end) - __reserve_region_with_split(root, conflict->end+1, end, name); } void __init reserve_region_with_split(struct resource *root, diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 0984a21076a3..15f60d01198b 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) p->signal->autogroup = autogroup_kref_get(ag); - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) - goto out; - t = p; do { sched_move_task(t); } while_each_thread(p, t); -out: unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h index 8bd047142816..443232ebbb53 100644 --- a/kernel/sched/auto_group.h +++ b/kernel/sched/auto_group.h @@ -4,11 +4,6 @@ #include <linux/rwsem.h> struct autogroup { - /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. - */ struct kref kref; struct task_group *tg; struct rw_semaphore lock; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 974c9b2c0754..8319ecf82868 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1098,7 +1098,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. * * sched_move_task() holds both and thus holding either pins the cgroup, - * see set_task_rq(). + * see task_group(). * * Furthermore, all task_rq users should acquire both locks, see * task_rq_lock(). @@ -1688,7 +1688,8 @@ out: */ int wake_up_process(struct task_struct *p) { - return try_to_wake_up(p, TASK_ALL, 0); + WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); @@ -3111,6 +3112,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) #endif +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ + u64 temp = (__force u64) rtime; + + temp *= (__force u64) utime; + + if (sizeof(cputime_t) == 4) + temp = div_u64(temp, (__force u32) total); + else + temp = div64_u64(temp, (__force u64) total); + + return (__force cputime_t) temp; +} + void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { cputime_t rtime, utime = p->utime, total = utime + p->stime; @@ -3120,13 +3135,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) */ rtime = nsecs_to_cputime(p->se.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(utime, rtime, total); + else utime = rtime; /* @@ -3153,13 +3164,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) total = cputime.utime + cputime.stime; rtime = nsecs_to_cputime(cputime.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) cputime.utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(cputime.utime, rtime, total); + else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); @@ -6958,34 +6965,66 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev) } #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ +static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ + /* * Update cpusets according to cpu_active mask. If cpusets are * disabled, cpuset_update_active_cpus() becomes a simple wrapper * around partition_sched_domains(). + * + * If we come here as part of a suspend/resume, don't touch cpusets because we + * want to restore it back to its original state upon resume anyway. */ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch (action & ~CPU_TASKS_FROZEN) { + switch (action) { + case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED_FROZEN: + + /* + * num_cpus_frozen tracks how many CPUs are involved in suspend + * resume sequence. As long as this is not the last online + * operation in the resume sequence, just build a single sched + * domain, ignoring cpusets. + */ + num_cpus_frozen--; + if (likely(num_cpus_frozen)) { + partition_sched_domains(1, NULL, NULL); + break; + } + + /* + * This is the last CPU online operation. So fall through and + * restore the original sched domains by considering the + * cpuset configurations. + */ + case CPU_ONLINE: case CPU_DOWN_FAILED: cpuset_update_active_cpus(); - return NOTIFY_OK; + break; default: return NOTIFY_DONE; } + return NOTIFY_OK; } static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { - switch (action & ~CPU_TASKS_FROZEN) { + switch (action) { case CPU_DOWN_PREPARE: cpuset_update_active_cpus(); - return NOTIFY_OK; + break; + case CPU_DOWN_PREPARE_FROZEN: + num_cpus_frozen++; + partition_sched_domains(1, NULL, NULL); + break; default: return NOTIFY_DONE; } + return NOTIFY_OK; } void __init sched_init_smp(void) @@ -7038,6 +7077,7 @@ int in_sched_functions(unsigned long addr) #ifdef CONFIG_CGROUP_SCHED struct task_group root_task_group; +LIST_HEAD(task_groups); #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); @@ -7458,6 +7498,7 @@ void sched_destroy_group(struct task_group *tg) */ void sched_move_task(struct task_struct *tsk) { + struct task_group *tg; int on_rq, running; unsigned long flags; struct rq *rq; @@ -7472,6 +7513,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); + tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id, + lockdep_is_held(&tsk->sighand->siglock)), + struct task_group, css); + tg = autogroup_task_group(tsk, tg); + tsk->sched_task_group = tg; + #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_move_group) tsk->sched_class->task_move_group(tsk, on_rq); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index be427c5bc4d7..3d4b1e2bda58 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -560,7 +560,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) static int do_balance_runtime(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); - struct root_domain *rd = cpu_rq(smp_processor_id())->rd; + struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; int i, weight, more = 0; u64 rt_period; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 99589411f980..4d166ee753db 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex; struct cfs_rq; struct rt_rq; -static LIST_HEAD(task_groups); +extern struct list_head task_groups; struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH @@ -541,22 +541,19 @@ DECLARE_PER_CPU(int, sd_llc_id); /* * Return the group to which this tasks belongs. * - * We use task_subsys_state_check() and extend the RCU verification with - * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each - * task it moves into the cgroup. Therefore by holding either of those locks, - * we pin the task to the current cgroup. + * We cannot use task_subsys_state() and friends because the cgroup + * subsystem changes that value before the cgroup_subsys::attach() method + * is called, therefore we cannot pin it and might observe the wrong value. + * + * The same is true for autogroup's p->signal->autogroup->tg, the autogroup + * core changes this before calling sched_move_task(). + * + * Instead we use a 'copy' which is updated from sched_move_task() while + * holding both task_struct::pi_lock and rq::lock. */ static inline struct task_group *task_group(struct task_struct *p) { - struct task_group *tg; - struct cgroup_subsys_state *css; - - css = task_subsys_state_check(p, cpu_cgroup_subsys_id, - lockdep_is_held(&p->pi_lock) || - lockdep_is_held(&task_rq(p)->lock)); - tg = container_of(css, struct task_group, css); - - return autogroup_task_group(p, tg); + return p->sched_task_group; } /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 7b386e86fd23..da5eb5bed84a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) + if (stop && stop->on_rq) { + stop->se.exec_start = rq->clock_task; return stop; + } return NULL; } @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { + struct task_struct *curr = rq->curr; + u64 delta_exec; + + delta_exec = rq->clock_task - curr->se.exec_start; + if (unlikely((s64)delta_exec < 0)) + delta_exec = 0; + + schedstat_set(curr->se.statistics.exec_max, + max(curr->se.statistics.exec_max, delta_exec)); + + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + + curr->se.exec_start = rq->clock_task; + cpuacct_charge(curr, delta_exec); } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) static void set_curr_task_stop(struct rq *rq) { + struct task_struct *stop = rq->stop; + + stop->se.exec_start = rq->clock_task; } static void switched_to_stop(struct rq *rq, struct task_struct *p) diff --git a/kernel/signal.c b/kernel/signal.c index 1dd48f7e7d6f..563b7711d234 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -677,23 +677,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * No need to set need_resched since signal event passing * goes through ->blocked */ -void signal_wake_up(struct task_struct *t, int resume) +void signal_wake_up_state(struct task_struct *t, unsigned int state) { - unsigned int mask; - set_tsk_thread_flag(t, TIF_SIGPENDING); - /* - * For SIGKILL, we want to wake it up in the stopped/traced/killable + * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ - mask = TASK_INTERRUPTIBLE; - if (resume) - mask |= TASK_WAKEKILL; - if (!wake_up_state(t, mask)) + if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) kick_process(t); } @@ -842,7 +836,7 @@ static void ptrace_trap_notify(struct task_struct *t) assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); - signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); } /* @@ -1808,6 +1802,10 @@ static inline int may_ptrace_stop(void) * If SIGKILL was already sent before the caller unlocked * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). + * + * This is almost outdated, a task with the pending SIGKILL can't + * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported + * after SIGKILL was already dequeued. */ if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) @@ -1933,6 +1931,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done) do_notify_parent_cldstop(current, false, why); + /* tasklist protects us from ptrace_freeze_traced() */ __set_current_state(TASK_RUNNING); if (clear_code) current->exit_code = 0; diff --git a/kernel/smp.c b/kernel/smp.c index 2f8b10ecf759..d5f3238ec458 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -31,6 +31,7 @@ struct call_function_data { struct call_single_data csd; atomic_t refs; cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); @@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); + if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, + cpu_to_node(cpu))) + return notifier_from_errno(-ENOMEM); break; #ifdef CONFIG_HOTPLUG_CPU @@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); + free_cpumask_var(cfd->cpumask_ipi); break; #endif }; @@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask, return; } + /* + * After we put an entry into the list, data->cpumask + * may be cleared again when another CPU sends another IPI for + * a SMP function call, so data->cpumask will be zero. + */ + cpumask_copy(data->cpumask_ipi, data->cpumask); raw_spin_lock_irqsave(&call_function.lock, flags); /* * Place entry at the _HEAD_ of the list, so that any cpu still @@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask, smp_mb(); /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi_mask(data->cpumask); + arch_send_call_function_ipi_mask(data->cpumask_ipi); /* Optionally wait for the CPUs to complete */ if (wait) diff --git a/kernel/sys.c b/kernel/sys.c index e7006eb6c1e4..b0003db7fea0 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier); void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); + disable_nonboot_cpus(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); else @@ -1179,15 +1180,16 @@ DECLARE_RWSEM(uts_sem); * Work around broken programs that cannot handle "Linux 3.0". * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 */ -static int override_release(char __user *release, int len) +static int override_release(char __user *release, size_t len) { int ret = 0; - char buf[65]; if (current->personality & UNAME26) { - char *rest = UTS_RELEASE; + const char *rest = UTS_RELEASE; + char buf[65] = { 0 }; int ndots = 0; unsigned v; + size_t copy; while (*rest) { if (*rest == '.' && ++ndots >= 3) @@ -1197,8 +1199,9 @@ static int override_release(char __user *release, int len) rest++; } v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; - snprintf(buf, len, "2.6.%u%s", v, rest); - ret = copy_to_user(release, buf, len); + copy = clamp_t(size_t, len, 1, sizeof(buf)); + copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); + ret = copy_to_user(release, buf, copy + 1); } return ret; } diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index a650694883a1..9f9aa3205973 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file, /* Convert the decnet address to binary */ result = -EIO; - nodep = strchr(buf, '.') + 1; + nodep = strchr(buf, '.'); if (!nodep) goto out; + ++nodep; area = simple_strtoul(buf, NULL, 10); node = simple_strtoul(nodep, NULL, 10); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index fd4e160aa9c4..e60347797359 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -145,6 +145,7 @@ static void tick_nohz_update_jiffies(ktime_t now) tick_do_update_jiffies64(now); local_irq_restore(flags); + calc_load_exit_idle(); touch_softlockup_watchdog(); } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index da680a659f72..fad5c9eee531 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -385,7 +385,7 @@ int do_settimeofday(const struct timespec *tv) struct timespec ts_delta; unsigned long flags; - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + if (!timespec_valid_strict(tv)) return -EINVAL; write_seqlock_irqsave(&timekeeper.lock, flags); @@ -420,6 +420,8 @@ EXPORT_SYMBOL(do_settimeofday); int timekeeping_inject_offset(struct timespec *ts) { unsigned long flags; + struct timespec tmp; + int ret = 0; if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -428,10 +430,17 @@ int timekeeping_inject_offset(struct timespec *ts) timekeeping_forward_now(); + tmp = timespec_add(timekeeper.xtime, *ts); + if (!timespec_valid_strict(&tmp)) { + ret = -EINVAL; + goto error; + } + timekeeper.xtime = timespec_add(timekeeper.xtime, *ts); timekeeper.wall_to_monotonic = timespec_sub(timekeeper.wall_to_monotonic, *ts); +error: /* even if we error out, we forwarded the time, so call update */ timekeeping_update(true); write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -439,7 +448,7 @@ int timekeeping_inject_offset(struct timespec *ts) /* signal hrtimers about time change */ clock_was_set(); - return 0; + return ret; } EXPORT_SYMBOL(timekeeping_inject_offset); @@ -599,7 +608,20 @@ void __init timekeeping_init(void) struct timespec now, boot; read_persistent_clock(&now); + if (!timespec_valid_strict(&now)) { + pr_warn("WARNING: Persistent clock returned invalid value!\n" + " Check your CMOS/BIOS settings.\n"); + now.tv_sec = 0; + now.tv_nsec = 0; + } + read_boot_clock(&boot); + if (!timespec_valid_strict(&boot)) { + pr_warn("WARNING: Boot clock returned invalid value!\n" + " Check your CMOS/BIOS settings.\n"); + boot.tv_sec = 0; + boot.tv_nsec = 0; + } seqlock_init(&timekeeper.lock); @@ -645,7 +667,7 @@ static void update_sleep_time(struct timespec t) */ static void __timekeeping_inject_sleeptime(struct timespec *delta) { - if (!timespec_valid(delta)) { + if (!timespec_valid_strict(delta)) { printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " "sleep delta value!\n"); return; @@ -992,7 +1014,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) } /* Accumulate raw time */ - raw_nsecs = timekeeper.raw_interval << shift; + raw_nsecs = (u64)timekeeper.raw_interval << shift; raw_nsecs += timekeeper.raw_time.tv_nsec; if (raw_nsecs >= NSEC_PER_SEC) { u64 raw_secs = raw_nsecs; @@ -1035,9 +1057,12 @@ static void update_wall_time(void) #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif + /* Check if there's really nothing to do */ + if (offset < timekeeper.cycle_interval) + goto out; + timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec << timekeeper.shift; - /* * With NO_HZ we may have to accumulate many cycle_intervals * (think "ticks") worth of time at once. To do this efficiently, diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl index eb51d76e058a..3f42652a6a37 100644 --- a/kernel/timeconst.pl +++ b/kernel/timeconst.pl @@ -369,10 +369,8 @@ if ($hz eq '--can') { die "Usage: $0 HZ\n"; } - @val = @{$canned_values{$hz}}; - if (!defined(@val)) { - @val = compute_values($hz); - } + $cv = $canned_values{$hz}; + @val = defined($cv) ? @$cv : compute_values($hz); output($hz, @val); } exit 0; diff --git a/kernel/timer.c b/kernel/timer.c index a297ffcf888e..6dfdb72828ff 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64); #define TVR_SIZE (1 << TVR_BITS) #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) +#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) struct tvec { struct list_head vec[TVN_SIZE]; @@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); } else { int i; - /* If the timeout is larger than 0xffffffff on 64-bit - * architectures then we use the maximum timeout: + /* If the timeout is larger than MAX_TVAL (on 64-bit + * architectures or with CONFIG_BASE_SMALL=1) then we + * use the maximum timeout. */ - if (idx > 0xffffffffUL) { - idx = 0xffffffffUL; + if (idx > MAX_TVAL) { + idx = MAX_TVAL; expires = idx + base->timer_jiffies; } i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0fa92f677c92..4a86e6404085 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2368,7 +2368,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) { iter->pos = 0; iter->func_pos = 0; - iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); + iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); } static void *t_start(struct seq_file *m, loff_t *pos) @@ -3841,35 +3841,49 @@ static void ftrace_init_module(struct module *mod, ftrace_process_locs(mod, start, end); } -static int ftrace_module_notify(struct notifier_block *self, - unsigned long val, void *data) +static int ftrace_module_notify_enter(struct notifier_block *self, + unsigned long val, void *data) { struct module *mod = data; - switch (val) { - case MODULE_STATE_COMING: + if (val == MODULE_STATE_COMING) ftrace_init_module(mod, mod->ftrace_callsites, mod->ftrace_callsites + mod->num_ftrace_callsites); - break; - case MODULE_STATE_GOING: + return 0; +} + +static int ftrace_module_notify_exit(struct notifier_block *self, + unsigned long val, void *data) +{ + struct module *mod = data; + + if (val == MODULE_STATE_GOING) ftrace_release_mod(mod); - break; - } return 0; } #else -static int ftrace_module_notify(struct notifier_block *self, - unsigned long val, void *data) +static int ftrace_module_notify_enter(struct notifier_block *self, + unsigned long val, void *data) +{ + return 0; +} +static int ftrace_module_notify_exit(struct notifier_block *self, + unsigned long val, void *data) { return 0; } #endif /* CONFIG_MODULES */ -struct notifier_block ftrace_module_nb = { - .notifier_call = ftrace_module_notify, - .priority = 0, +struct notifier_block ftrace_module_enter_nb = { + .notifier_call = ftrace_module_notify_enter, + .priority = INT_MAX, /* Run before anything that can use kprobes */ +}; + +struct notifier_block ftrace_module_exit_nb = { + .notifier_call = ftrace_module_notify_exit, + .priority = INT_MIN, /* Run after anything that can remove kprobes */ }; extern unsigned long __start_mcount_loc[]; @@ -3903,9 +3917,13 @@ void __init ftrace_init(void) __start_mcount_loc, __stop_mcount_loc); - ret = register_module_notifier(&ftrace_module_nb); + ret = register_module_notifier(&ftrace_module_enter_nb); + if (ret) + pr_warning("Failed to register trace ftrace module enter notifier\n"); + + ret = register_module_notifier(&ftrace_module_exit_nb); if (ret) - pr_warning("Failed to register trace ftrace module notifier\n"); + pr_warning("Failed to register trace ftrace module exit notifier\n"); set_ftrace_early_filters(); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index cf8d11e91efd..28667834181e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2708,7 +2708,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) unsigned long flags; struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; - unsigned long ret; + unsigned long ret = 0; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; @@ -2723,7 +2723,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) bpage = cpu_buffer->reader_page; else bpage = rb_set_head_page(cpu_buffer); - ret = bpage->page->time_stamp; + if (bpage) + ret = bpage->page->time_stamp; raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return ret; @@ -3030,6 +3031,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) * Splice the empty reader page into the list around the head. */ reader = rb_set_head_page(cpu_buffer); + if (!reader) + goto out; cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); cpu_buffer->reader_page->list.prev = reader->list.prev; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index df30ee08bdd4..991aa9381a44 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -112,7 +112,7 @@ static unsigned long get_timestamp(int this_cpu) return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } -static unsigned long get_sample_period(void) +static u64 get_sample_period(void) { /* * convert watchdog_thresh from seconds to ns @@ -121,7 +121,7 @@ static unsigned long get_sample_period(void) * and hard thresholds) to increment before the * hardlockup detector generates a warning */ - return get_softlockup_thresh() * (NSEC_PER_SEC / 5); + return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7593a9e871de..c1e3432cacae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1141,8 +1141,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { unsigned int lcpu; - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); + WARN_ON_ONCE(timer_pending(timer)); + WARN_ON_ONCE(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); @@ -1988,7 +1988,9 @@ __acquires(&gcwq->lock) spin_unlock_irq(&gcwq->lock); + smp_wmb(); /* paired with test_and_set_bit(PENDING) */ work_clear_pending(work); + lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); @@ -2172,8 +2174,10 @@ static int rescuer_thread(void *__wq) repeat: set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); return 0; + } /* * See whether any cpu is asking for help. Unbounded @@ -3488,18 +3492,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, #ifdef CONFIG_SMP struct work_for_cpu { - struct completion completion; + struct work_struct work; long (*fn)(void *); void *arg; long ret; }; -static int do_work_for_cpu(void *_wfc) +static void work_for_cpu_fn(struct work_struct *work) { - struct work_for_cpu *wfc = _wfc; + struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); + wfc->ret = wfc->fn(wfc->arg); - complete(&wfc->completion); - return 0; } /** @@ -3514,19 +3517,11 @@ static int do_work_for_cpu(void *_wfc) */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { - struct task_struct *sub_thread; - struct work_for_cpu wfc = { - .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), - .fn = fn, - .arg = arg, - }; + struct work_for_cpu wfc = { .fn = fn, .arg = arg }; - sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); - if (IS_ERR(sub_thread)) - return PTR_ERR(sub_thread); - kthread_bind(sub_thread, cpu); - wake_up_process(sub_thread); - wait_for_completion(&wfc.completion); + INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); + schedule_work_on(cpu, &wfc.work); + flush_work(&wfc.work); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); diff --git a/lib/atomic64.c b/lib/atomic64.c index 978537809d84..08a4f068e61e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -31,7 +31,11 @@ static union { raw_spinlock_t lock; char pad[L1_CACHE_BYTES]; -} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; +} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { + [0 ... (NR_LOCKS - 1)] = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), + }, +}; static inline raw_spinlock_t *lock_addr(const atomic64_t *v) { @@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) return ret; } EXPORT_SYMBOL(atomic64_add_unless); - -static int init_atomic64_lock(void) -{ - int i; - - for (i = 0; i < NR_LOCKS; ++i) - raw_spin_lock_init(&atomic64_lock[i].lock); - return 0; -} - -pure_initcall(init_atomic64_lock); diff --git a/lib/digsig.c b/lib/digsig.c index 286d558033e2..dc2be7ed1765 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -162,10 +162,14 @@ static int digsig_verify_rsa(struct key *key, memset(out1, 0, head); memcpy(out1 + head, p, l); + kfree(p); + err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); + if (err) + goto err; - if (!err && len == hlen) - err = memcmp(out2, h, hlen); + if (len != hlen || memcmp(out2, h, hlen)) + err = -EINVAL; err: mpi_free(in); diff --git a/lib/gcd.c b/lib/gcd.c index cce4f3cd14b3..3657f129d7b8 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b) if (a < b) swap(a, b); + + if (!b) + return a; while ((r = a % b) != 0) { a = b; b = r; diff --git a/lib/genalloc.c b/lib/genalloc.c index b6c4de49e224..e900e1f85926 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -176,7 +176,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + - (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + BITS_TO_LONGS(nbits) * sizeof(long); chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) diff --git a/lib/idr.c b/lib/idr.c index 4046e29c0a99..e90d2d05d784 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -625,7 +625,14 @@ void *idr_get_next(struct idr *idp, int *nextidp) return p; } - id += 1 << n; + /* + * Proceed to the next layer at the current level. Unlike + * idr_for_each(), @id isn't guaranteed to be aligned to + * layer boundary at this point and adding 1 << n may + * incorrectly skip IDs. Make sure we jump to the + * beginning of the next layer using round_up(). + */ + id = round_up(id + 1, 1 << n); while (n < fls(id)) { n += IDR_BITS; p = *--paa; diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 29f98624ef93..280405b47e71 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -703,7 +703,14 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + UDItype __ll = (UDItype)(u) * (v); \ + w1 = __ll >> 32; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("multu %2,%3" \ : "=l" ((USItype)(w0)), \ @@ -728,7 +735,15 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ + __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \ + w1 = __ll >> 64; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("dmultu %2,%3" \ : "=l" ((UDItype)(w0)), \ diff --git a/mm/bootmem.c b/mm/bootmem.c index 0131170c9d54..53cf62b186b6 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -766,13 +766,17 @@ void * __init alloc_bootmem_section(unsigned long size, unsigned long section_nr) { bootmem_data_t *bdata; - unsigned long pfn, goal; + unsigned long pfn, goal, limit; pfn = section_nr_to_pfn(section_nr); goal = pfn << PAGE_SHIFT; + limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; - return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0); + if (goal + size > limit) + limit = 0; + + return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); } #endif diff --git a/mm/compaction.c b/mm/compaction.c index eede98104e9f..10f9bf96f7f0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -840,7 +840,7 @@ static int compact_node(int nid) } /* Compact all nodes in the system */ -static int compact_nodes(void) +static void compact_nodes(void) { int nid; @@ -849,8 +849,6 @@ static int compact_nodes(void) for_each_online_node(nid) compact_node(nid); - - return COMPACT_COMPLETE; } /* The written value is actually unused, all memory is compacted */ @@ -861,7 +859,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { if (write) - return compact_nodes(); + compact_nodes(); return 0; } diff --git a/mm/dmapool.c b/mm/dmapool.c index c5ab33bca0a8..da1b0f0b8709 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -50,7 +50,6 @@ struct dma_pool { /* the pool */ size_t allocation; size_t boundary; char name[32]; - wait_queue_head_t waitq; struct list_head pools; }; @@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ unsigned int offset; }; -#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) - static DEFINE_MUTEX(pools_lock); static ssize_t @@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, retval->size = size; retval->boundary = boundary; retval->allocation = allocation; - init_waitqueue_head(&retval->waitq); if (dev) { int ret; @@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif pool_initialise_page(pool, page); - list_add(&page->page_list, &pool->page_list); page->in_use = 0; page->offset = 0; } else { @@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, might_sleep_if(mem_flags & __GFP_WAIT); spin_lock_irqsave(&pool->lock, flags); - restart: list_for_each_entry(page, &pool->page_list, page_list) { if (page->offset < pool->allocation) goto ready; } - page = pool_alloc_page(pool, GFP_ATOMIC); - if (!page) { - if (mem_flags & __GFP_WAIT) { - DECLARE_WAITQUEUE(wait, current); - __set_current_state(TASK_UNINTERRUPTIBLE); - __add_wait_queue(&pool->waitq, &wait); - spin_unlock_irqrestore(&pool->lock, flags); + /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ + spin_unlock_irqrestore(&pool->lock, flags); - schedule_timeout(POOL_TIMEOUT_JIFFIES); + page = pool_alloc_page(pool, mem_flags); + if (!page) + return NULL; - spin_lock_irqsave(&pool->lock, flags); - __remove_wait_queue(&pool->waitq, &wait); - goto restart; - } - retval = NULL; - goto done; - } + spin_lock_irqsave(&pool->lock, flags); + list_add(&page->page_list, &pool->page_list); ready: page->in_use++; offset = page->offset; @@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, #ifdef DMAPOOL_DEBUG memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif - done: spin_unlock_irqrestore(&pool->lock, flags); return retval; } @@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) page->in_use--; *(int *)vaddr = page->offset; page->offset = offset; - if (waitqueue_active(&pool->waitq)) - wake_up_locked(&pool->waitq); /* * Resist a temptation to do * if (!is_page_busy(page)) pool_free_page(pool, page); diff --git a/mm/fadvise.c b/mm/fadvise.c index 469491e0af79..dcb98722a22f 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -17,6 +17,7 @@ #include <linux/fadvise.h> #include <linux/writeback.h> #include <linux/syscalls.h> +#include <linux/swap.h> #include <asm/unistd.h> @@ -124,9 +125,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; end_index = (endbyte >> PAGE_CACHE_SHIFT); - if (end_index >= start_index) - invalidate_mapping_pages(mapping, start_index, + if (end_index >= start_index) { + unsigned long count = invalidate_mapping_pages(mapping, + start_index, end_index); + + /* + * If fewer pages were invalidated than expected then + * it is possible that some of the pages were on + * a per-cpu pagevec for a remote CPU. Drain all + * pagevecs and try again. + */ + if (count < (end_index - start_index + 1)) { + lru_add_drain_all(); + invalidate_mapping_pages(mapping, start_index, end_index); + } + } break; default: ret = -EINVAL; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f0e5306eeb55..caf15b6fa753 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -950,6 +950,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); + if (ret & VM_FAULT_OOM) + split_huge_page(page); put_page(page); goto out; } @@ -957,6 +959,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); + split_huge_page(page); put_page(page); ret |= VM_FAULT_OOM; goto out; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a799df59d310..c384e0901f9c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2431,7 +2431,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); - pgoff = vma_hugecache_offset(h, vma, address); + pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + + vma->vm_pgoff; mapping = vma->vm_file->f_dentry->d_inode->i_mapping; /* diff --git a/mm/memblock.c b/mm/memblock.c index 280d3d7835d6..11e5bd174f3c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -908,6 +908,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } +void __init_memblock memblock_trim_memory(phys_addr_t align) +{ + int i; + phys_addr_t start, end, orig_start, orig_end; + struct memblock_type *mem = &memblock.memory; + + for (i = 0; i < mem->cnt; i++) { + orig_start = mem->regions[i].base; + orig_end = mem->regions[i].base + mem->regions[i].size; + start = round_up(orig_start, align); + end = round_down(orig_end, align); + + if (start == orig_start && end == orig_end) + continue; + + if (start < end) { + mem->regions[i].base = start; + mem->regions[i].size = end - start; + } else { + memblock_remove_region(mem, i); + i--; + } + } +} void __init_memblock memblock_set_current_limit(phys_addr_t limit) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7685d4a0b3ce..81c275b3afed 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1489,17 +1489,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) { u64 limit; - u64 memsw; limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - limit += total_swap_pages << PAGE_SHIFT; - memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); /* - * If memsw is finite and limits the amount of swap space available - * to this memcg, return that limit. + * Do not consider swap space if we cannot swap due to swappiness */ - return min(limit, memsw); + if (mem_cgroup_swappiness(memcg)) { + u64 memsw; + + limit += total_swap_pages << PAGE_SHIFT; + memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + + /* + * If memsw is finite and limits the amount of swap space + * available to this memcg, return that limit. + */ + limit = min(limit, memsw); + } + + return limit; } static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9299eea77f72..b68c8f66e4d3 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1481,9 +1481,17 @@ int soft_offline_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); + struct page *hpage = compound_trans_head(page); if (PageHuge(page)) return soft_offline_huge_page(page, flags); + if (PageTransHuge(hpage)) { + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { + pr_info("soft offline: %#lx: failed to split THP\n", + pfn); + return -EBUSY; + } + } ret = get_any_page(page, pfn, flags); if (ret < 0) diff --git a/mm/memory.c b/mm/memory.c index 6105f475fa86..2f42aab50e9b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -182,10 +182,14 @@ static int tlb_next_batch(struct mmu_gather *tlb) return 1; } + if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) + return 0; + batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return 0; + tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; @@ -212,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; + tlb->batch_count = 0; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; @@ -3489,6 +3494,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); +retry: pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) @@ -3502,13 +3508,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pmd, flags); } else { pmd_t orig_pmd = *pmd; + int ret; + barrier(); if (pmd_trans_huge(orig_pmd)) { if (flags & FAULT_FLAG_WRITE && !pmd_write(orig_pmd) && - !pmd_trans_splitting(orig_pmd)) - return do_huge_pmd_wp_page(mm, vma, address, - pmd, orig_pmd); + !pmd_trans_splitting(orig_pmd)) { + ret = do_huge_pmd_wp_page(mm, vma, address, pmd, + orig_pmd); + /* + * If COW results in an oom, the huge pmd will + * have been split, so retry the fault on the + * pte for a smaller charge. + */ + if (unlikely(ret & VM_FAULT_OOM)) + goto retry; + return ret; + } return 0; } } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index fc898cb4fe8f..77ad30613a3d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) struct mem_section *ms; struct page *page, *memmap; - if (!pfn_valid(start_pfn)) - return; - section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); @@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) end_pfn = pfn + pgdat->node_spanned_pages; /* register_section info */ - for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) - register_page_bootmem_info_section(pfn); - + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + /* + * Some platforms can assign the same pfn to multiple nodes - on + * node0 as well as nodeN. To avoid registering a pfn against + * multiple nodes we check that this pfn does not already + * reside in some other node. + */ + if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) + register_page_bootmem_info_section(pfn); + } } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bf5b485ecd31..82f1b027ba1c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -607,6 +607,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, return first; } +/* + * Apply policy to a single VMA + * This must be called with the mmap_sem held for writing. + */ +static int vma_replace_policy(struct vm_area_struct *vma, + struct mempolicy *pol) +{ + int err; + struct mempolicy *old; + struct mempolicy *new; + + pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", + vma->vm_start, vma->vm_end, vma->vm_pgoff, + vma->vm_ops, vma->vm_file, + vma->vm_ops ? vma->vm_ops->set_policy : NULL); + + new = mpol_dup(pol); + if (IS_ERR(new)) + return PTR_ERR(new); + + if (vma->vm_ops && vma->vm_ops->set_policy) { + err = vma->vm_ops->set_policy(vma, new); + if (err) + goto err_out; + } + + old = vma->vm_policy; + vma->vm_policy = new; /* protected by mmap_sem */ + mpol_put(old); + + return 0; + err_out: + mpol_put(new); + return err; +} + /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) @@ -655,23 +691,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (err) goto out; } - - /* - * Apply policy to a single VMA. The reference counting of - * policy for vma_policy linkages has already been handled by - * vma_merge and split_vma as necessary. If this is a shared - * policy then ->set_policy will increment the reference count - * for an sp node. - */ - pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff, - vma->vm_ops, vma->vm_file, - vma->vm_ops ? vma->vm_ops->set_policy : NULL); - if (vma->vm_ops && vma->vm_ops->set_policy) { - err = vma->vm_ops->set_policy(vma, new_pol); - if (err) - goto out; - } + err = vma_replace_policy(vma, new_pol); + if (err) + goto out; } out: @@ -1510,8 +1532,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task, addr); if (vpol) pol = vpol; - } else if (vma->vm_policy) + } else if (vma->vm_policy) { pol = vma->vm_policy; + + /* + * shmem_alloc_page() passes MPOL_F_SHARED policy with + * a pseudo vma whose vma->vm_ops=NULL. Take a reference + * count on these policies which will be dropped by + * mpol_cond_put() later + */ + if (mpol_needs_cond_ref(pol)) + mpol_get(pol); + } } if (!pol) pol = &default_policy; @@ -1977,28 +2009,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) return new; } -/* - * If *frompol needs [has] an extra ref, copy *frompol to *tompol , - * eliminate the * MPOL_F_* flags that require conditional ref and - * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly - * after return. Use the returned value. - * - * Allows use of a mempolicy for, e.g., multiple allocations with a single - * policy lookup, even if the policy needs/has extra ref on lookup. - * shmem_readahead needs this. - */ -struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, - struct mempolicy *frompol) -{ - if (!mpol_needs_cond_ref(frompol)) - return frompol; - - *tompol = *frompol; - tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ - __mpol_put(frompol); - return tompol; -} - /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { @@ -2035,7 +2045,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) */ /* lookup first element intersecting start-end */ -/* Caller holds sp->lock */ +/* Caller holds sp->mutex */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { @@ -2099,36 +2109,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) if (!sp->root.rb_node) return NULL; - spin_lock(&sp->lock); + mutex_lock(&sp->mutex); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } - spin_unlock(&sp->lock); + mutex_unlock(&sp->mutex); return pol; } +static void sp_free(struct sp_node *n) +{ + mpol_put(n->policy); + kmem_cache_free(sn_cache, n); +} + static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); - mpol_put(n->policy); - kmem_cache_free(sn_cache, n); + sp_free(n); } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { - struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); + struct sp_node *n; + struct mempolicy *newpol; + n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; + + newpol = mpol_dup(pol); + if (IS_ERR(newpol)) { + kmem_cache_free(sn_cache, n); + return NULL; + } + newpol->flags |= MPOL_F_SHARED; + n->start = start; n->end = end; - mpol_get(pol); - pol->flags |= MPOL_F_SHARED; /* for unref */ - n->policy = pol; + n->policy = newpol; + return n; } @@ -2136,10 +2160,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end, static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { - struct sp_node *n, *new2 = NULL; + struct sp_node *n; + int ret = 0; -restart: - spin_lock(&sp->lock); + mutex_lock(&sp->mutex); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { @@ -2152,16 +2176,14 @@ restart: } else { /* Old policy spanning whole new range. */ if (n->end > end) { + struct sp_node *new2; + new2 = sp_alloc(end, n->end, n->policy); if (!new2) { - spin_unlock(&sp->lock); - new2 = sp_alloc(end, n->end, n->policy); - if (!new2) - return -ENOMEM; - goto restart; + ret = -ENOMEM; + goto out; } n->end = start; sp_insert(sp, new2); - new2 = NULL; break; } else n->end = start; @@ -2172,12 +2194,9 @@ restart: } if (new) sp_insert(sp, new); - spin_unlock(&sp->lock); - if (new2) { - mpol_put(new2->policy); - kmem_cache_free(sn_cache, new2); - } - return 0; +out: + mutex_unlock(&sp->mutex); + return ret; } /** @@ -2195,7 +2214,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ - spin_lock_init(&sp->lock); + mutex_init(&sp->mutex); if (mpol) { struct vm_area_struct pvma; @@ -2249,7 +2268,7 @@ int mpol_set_shared_policy(struct shared_policy *info, } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) - kmem_cache_free(sn_cache, new); + sp_free(new); return err; } @@ -2261,16 +2280,14 @@ void mpol_free_shared_policy(struct shared_policy *p) if (!p->root.rb_node) return; - spin_lock(&p->lock); + mutex_lock(&p->mutex); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); - rb_erase(&n->nd, &p->root); - mpol_put(n->policy); - kmem_cache_free(sn_cache, n); + sp_delete(p, n); } - spin_unlock(&p->lock); + mutex_unlock(&p->mutex); } /* assumes fs == KERNEL_DS */ @@ -2327,8 +2344,7 @@ void numa_default_policy(void) */ /* - * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag - * Used only for mpol_parse_str() and mpol_to_str() + * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ #define MPOL_LOCAL MPOL_MAX static const char * const policy_modes[] = @@ -2343,28 +2359,21 @@ static const char * const policy_modes[] = #ifdef CONFIG_TMPFS /** - * mpol_parse_str - parse string to mempolicy + * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. - * @no_context: flag whether to "contextualize" the mempolicy + * @unused: redundant argument, to be removed later. * * Format of input: * <mode>[=<flags>][:<nodelist>] * - * if @no_context is true, save the input nodemask in w.user_nodemask in - * the returned mempolicy. This will be used to "clone" the mempolicy in - * a specific context [cpuset] at a later time. Used to parse tmpfs mpol - * mount option. Note that if 'static' or 'relative' mode flags were - * specified, the input nodemask will already have been saved. Saving - * it again is redundant, but safe. - * * On success, returns 0, else 1 */ -int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) +int mpol_parse_str(char *str, struct mempolicy **mpol, int unused) { struct mempolicy *new = NULL; unsigned short mode; - unsigned short uninitialized_var(mode_flags); + unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); @@ -2452,24 +2461,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) if (IS_ERR(new)) goto out; - if (no_context) { - /* save for contextualization */ - new->w.user_nodemask = nodes; - } else { - int ret; - NODEMASK_SCRATCH(scratch); - if (scratch) { - task_lock(current); - ret = mpol_set_nodemask(new, &nodes, scratch); - task_unlock(current); - } else - ret = -ENOMEM; - NODEMASK_SCRATCH_FREE(scratch); - if (ret) { - mpol_put(new); - goto out; - } - } + /* + * Save nodes for mpol_to_str() to show the tmpfs mount options + * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. + */ + if (mode != MPOL_PREFERRED) + new->v.nodes = nodes; + else if (nodelist) + new->v.preferred_node = first_node(nodes); + else + new->flags |= MPOL_F_LOCAL; + + /* + * Save nodes for contextualization: this will be used to "clone" + * the mempolicy in a specific context [cpuset] at a later time. + */ + new->w.user_nodemask = nodes; + err = 0; out: @@ -2489,13 +2497,13 @@ out: * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted - * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask + * @unused: redundant argument, to be removed later. * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ -int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) +int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused) { char *p = buffer; int l; @@ -2521,7 +2529,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) case MPOL_PREFERRED: nodes_clear(nodes); if (flags & MPOL_F_LOCAL) - mode = MPOL_LOCAL; /* pseudo-policy */ + mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; @@ -2529,14 +2537,11 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: - if (no_context) - nodes = pol->w.user_nodemask; - else - nodes = pol->v.nodes; + nodes = pol->v.nodes; break; default: - BUG(); + return -EINVAL; } l = strlen(policy_modes[mode]); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 862b60822d9f..8d1ca2de4080 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -14,10 +14,14 @@ #include <linux/export.h> #include <linux/mm.h> #include <linux/err.h> +#include <linux/srcu.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/slab.h> +/* global SRCU for all MMs */ +static struct srcu_struct srcu; + /* * This function can't run concurrently against mmu_notifier_register * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap @@ -25,58 +29,61 @@ * in parallel despite there being no task using this mm any more, * through the vmas outside of the exit_mmap context, such as with * vmtruncate. This serializes against mmu_notifier_unregister with - * the mmu_notifier_mm->lock in addition to RCU and it serializes - * against the other mmu notifiers with RCU. struct mmu_notifier_mm + * the mmu_notifier_mm->lock in addition to SRCU and it serializes + * against the other mmu notifiers with SRCU. struct mmu_notifier_mm * can't go away from under us as exit_mmap holds an mm_count pin * itself. */ void __mmu_notifier_release(struct mm_struct *mm) { struct mmu_notifier *mn; - struct hlist_node *n; + int id; /* - * RCU here will block mmu_notifier_unregister until - * ->release returns. + * srcu_read_lock() here will block synchronize_srcu() in + * mmu_notifier_unregister() until all registered + * ->release() callouts this function makes have + * returned. */ - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) - /* - * if ->release runs before mmu_notifier_unregister it - * must be handled as it's the only way for the driver - * to flush all existing sptes and stop the driver - * from establishing any more sptes before all the - * pages in the mm are freed. - */ - if (mn->ops->release) - mn->ops->release(mn, mm); - rcu_read_unlock(); - + id = srcu_read_lock(&srcu); spin_lock(&mm->mmu_notifier_mm->lock); while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { mn = hlist_entry(mm->mmu_notifier_mm->list.first, struct mmu_notifier, hlist); + /* - * We arrived before mmu_notifier_unregister so - * mmu_notifier_unregister will do nothing other than - * to wait ->release to finish and - * mmu_notifier_unregister to return. + * Unlink. This will prevent mmu_notifier_unregister() + * from also making the ->release() callout. */ hlist_del_init_rcu(&mn->hlist); + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* + * Clear sptes. (see 'release' description in mmu_notifier.h) + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + + spin_lock(&mm->mmu_notifier_mm->lock); } spin_unlock(&mm->mmu_notifier_mm->lock); /* - * synchronize_rcu here prevents mmu_notifier_release to - * return to exit_mmap (which would proceed freeing all pages - * in the mm) until the ->release method returns, if it was - * invoked by mmu_notifier_unregister. - * - * The mmu_notifier_mm can't go away from under us because one - * mm_count is hold by exit_mmap. + * All callouts to ->release() which we have done are complete. + * Allow synchronize_srcu() in mmu_notifier_unregister() to complete + */ + srcu_read_unlock(&srcu, id); + + /* + * mmu_notifier_unregister() may have unlinked a notifier and may + * still be calling out to it. Additionally, other notifiers + * may have been active via vmtruncate() et. al. Block here + * to ensure that all notifier callouts for this mm have been + * completed and the sptes are really cleaned up before returning + * to exit_mmap(). */ - synchronize_rcu(); + synchronize_srcu(&srcu); } /* @@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, { struct mmu_notifier *mn; struct hlist_node *n; - int young = 0; + int young = 0, id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->clear_flush_young) young |= mn->ops->clear_flush_young(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); return young; } @@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm, { struct mmu_notifier *mn; struct hlist_node *n; - int young = 0; + int young = 0, id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->test_young) { young = mn->ops->test_young(mn, mm, address); @@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm, break; } } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); return young; } @@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, { struct mmu_notifier *mn; struct hlist_node *n; + int id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->change_pte) mn->ops->change_pte(mn, mm, address, pte); @@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, else if (mn->ops->invalidate_page) mn->ops->invalidate_page(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_page(struct mm_struct *mm, @@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm, { struct mmu_notifier *mn; struct hlist_node *n; + int id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_page) mn->ops->invalidate_page(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, @@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, { struct mmu_notifier *mn; struct hlist_node *n; + int id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_start) mn->ops->invalidate_range_start(mn, mm, start, end); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, @@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, { struct mmu_notifier *mn; struct hlist_node *n; + int id; - rcu_read_lock(); + id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_end) mn->ops->invalidate_range_end(mn, mm, start, end); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } static int do_mmu_notifier_register(struct mmu_notifier *mn, @@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, BUG_ON(atomic_read(&mm->mm_users) <= 0); + /* + * Verify that mmu_notifier_init() already run and the global srcu is + * initialized. + */ + BUG_ON(!srcu.per_cpu_ref); + ret = -ENOMEM; mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); if (unlikely(!mmu_notifier_mm)) @@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm) /* * This releases the mm_count pin automatically and frees the mm * structure if it was the last user of it. It serializes against - * running mmu notifiers with RCU and against mmu_notifier_unregister - * with the unregister lock + RCU. All sptes must be dropped before + * running mmu notifiers with SRCU and against mmu_notifier_unregister + * with the unregister lock + SRCU. All sptes must be dropped before * calling mmu_notifier_unregister. ->release or any other notifier * method may be invoked concurrently with mmu_notifier_unregister, * and only after mmu_notifier_unregister returned we're guaranteed @@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); + spin_lock(&mm->mmu_notifier_mm->lock); if (!hlist_unhashed(&mn->hlist)) { - /* - * RCU here will force exit_mmap to wait ->release to finish - * before freeing the pages. - */ - rcu_read_lock(); + int id; /* - * exit_mmap will block in mmu_notifier_release to - * guarantee ->release is called before freeing the - * pages. + * Ensure we synchronize up with __mmu_notifier_release(). */ + id = srcu_read_lock(&srcu); + + hlist_del_rcu(&mn->hlist); + spin_unlock(&mm->mmu_notifier_mm->lock); + if (mn->ops->release) mn->ops->release(mn, mm); - rcu_read_unlock(); - spin_lock(&mm->mmu_notifier_mm->lock); - hlist_del_rcu(&mn->hlist); + /* + * Allow __mmu_notifier_release() to complete. + */ + srcu_read_unlock(&srcu, id); + } else spin_unlock(&mm->mmu_notifier_mm->lock); - } /* - * Wait any running method to finish, of course including - * ->release if it was run by mmu_notifier_relase instead of us. + * Wait for any running method to finish, including ->release() if it + * was run by __mmu_notifier_release() instead of us. */ - synchronize_rcu(); + synchronize_srcu(&srcu); BUG_ON(atomic_read(&mm->mm_count) <= 0); mmdrop(mm); } EXPORT_SYMBOL_GPL(mmu_notifier_unregister); + +static int __init mmu_notifier_init(void) +{ + return init_srcu_struct(&srcu); +} + +module_init(mmu_notifier_init); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 26adea8ca2e7..bc8465f579a8 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -187,6 +187,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) zone_reclaimable_pages(z) - z->dirty_balance_reserve; } /* + * Unreclaimable memory (kernel memory or anonymous memory + * without swap) can bring down the dirtyable pages below + * the zone's dirty balance reserve and the above calculation + * will underflow. However we still want to add in nodes + * which are below threshold (negative values) to get a more + * accurate calculation but make sure that the total never + * underflows. + */ + if ((long)x < 0) + x = 0; + + /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure @@ -208,8 +220,8 @@ unsigned long global_dirtyable_memory(void) { unsigned long x; - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - - dirty_balance_reserve; + x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); + x -= min(x, dirty_balance_reserve); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); @@ -276,9 +288,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) * highmem zone can hold its share of dirty pages, so we don't * care about vm_highmem_is_dirtyable here. */ - return zone_page_state(zone, NR_FREE_PAGES) + - zone_reclaimable_pages(zone) - - zone->dirty_balance_reserve; + unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + + zone_reclaimable_pages(zone); + + /* don't allow this to underflow */ + nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + return nr_pages; } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0f20f1c354ef..ef868e299932 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -581,7 +581,7 @@ static inline void __free_one_page(struct page *page, combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); buddy_idx = __find_buddy_index(combined_idx, order + 1); - higher_buddy = page + (buddy_idx - combined_idx); + higher_buddy = higher_page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); @@ -4273,10 +4273,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, * round what is now in bits to nearest long in bits, then return it in * bytes. */ -static unsigned long __init usemap_size(unsigned long zonesize) +static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) { unsigned long usemapsize; + zonesize += zone_start_pfn & (pageblock_nr_pages-1); usemapsize = roundup(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; @@ -4286,17 +4287,19 @@ static unsigned long __init usemap_size(unsigned long zonesize) } static void __init setup_usemap(struct pglist_data *pgdat, - struct zone *zone, unsigned long zonesize) + struct zone *zone, + unsigned long zone_start_pfn, + unsigned long zonesize) { - unsigned long usemapsize = usemap_size(zonesize); + unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); zone->pageblock_flags = NULL; if (usemapsize) zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, usemapsize); } #else -static inline void setup_usemap(struct pglist_data *pgdat, - struct zone *zone, unsigned long zonesize) {} +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, + unsigned long zone_start_pfn, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -4424,7 +4427,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, continue; set_pageblock_order(pageblock_default_order()); - setup_usemap(pgdat, zone, size); + setup_usemap(pgdat, zone, zone_start_pfn, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); BUG_ON(ret); @@ -5409,7 +5412,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else - pfn = pfn - zone->zone_start_pfn; + pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } diff --git a/mm/rmap.c b/mm/rmap.c index 5b5ad584ffb7..bfca52c96999 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -56,6 +56,7 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> +#include <linux/backing-dev.h> #include <asm/tlbflush.h> @@ -977,11 +978,8 @@ int page_mkclean(struct page *page) if (page_mapped(page)) { struct address_space *mapping = page_mapping(page); - if (mapping) { + if (mapping) ret = page_mkclean_file(mapping, page); - if (page_test_and_clear_dirty(page_to_pfn(page), 1)) - ret = 1; - } } return ret; @@ -1167,6 +1165,7 @@ void page_add_file_rmap(struct page *page) */ void page_remove_rmap(struct page *page) { + struct address_space *mapping = page_mapping(page); bool anon = PageAnon(page); bool locked; unsigned long flags; @@ -1189,8 +1188,19 @@ void page_remove_rmap(struct page *page) * this if the page is anon, so about to be freed; but perhaps * not if it's in swapcache - there might be another pte slot * containing the swap entry, but page not yet written to swap. + * + * And we can skip it on file pages, so long as the filesystem + * participates in dirty tracking; but need to catch shm and tmpfs + * and ramfs pages which have been modified since creation by read + * fault. + * + * Note that mapping must be decided above, before decrementing + * mapcount (which luckily provides a barrier): once page is unmapped, + * it could be truncated and page->mapping reset to NULL at any moment. + * Note also that we are relying on page_mapping(page) to set mapping + * to &swapper_space when PageSwapCache(page). */ - if ((!anon || PageSwapCache(page)) && + if (mapping && !mapping_cap_account_dirty(mapping) && page_test_and_clear_dirty(page_to_pfn(page), 1)) set_page_dirty(page); /* diff --git a/mm/shmem.c b/mm/shmem.c index 2910f0d58c46..448956691e6b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -595,7 +595,7 @@ static void shmem_evict_inode(struct inode *inode) kfree(xattr->name); kfree(xattr); } - BUG_ON(inode->i_blocks); + WARN_ON(inode->i_blocks); shmem_free_inode(inode->i_sb); end_writeback(inode); } @@ -798,24 +798,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { - struct mempolicy mpol, *spol; struct vm_area_struct pvma; - - spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, index)); + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; pvma.vm_pgoff = index; pvma.vm_ops = NULL; - pvma.vm_policy = spol; - return swapin_readahead(swap, gfp, &pvma, 0); + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + + page = swapin_readahead(swap, gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; + struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; @@ -823,10 +827,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, pvma.vm_ops = NULL; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); - /* - * alloc_page_vma() will drop the shared policy reference - */ - return alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp, &pvma, 0); + + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(pvma.vm_policy); + + return page; } #else /* !CONFIG_NUMA */ #ifdef CONFIG_TMPFS @@ -2018,12 +2024,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb, { struct inode *inode; struct dentry *dentry = NULL; - u64 inum = fid->raw[2]; - inum = (inum << 32) | fid->raw[1]; + u64 inum; if (fh_len < 3) return NULL; + inum = fid->raw[2]; + inum = (inum << 32) | fid->raw[1]; + inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), shmem_match, fid->raw); if (inode) { @@ -2169,6 +2177,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) unsigned long inodes; int error = -EINVAL; + config.mpol = NULL; if (shmem_parse_options(data, &config, true)) return error; @@ -2193,8 +2202,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) sbinfo->max_inodes = config.max_inodes; sbinfo->free_inodes = config.max_inodes - inodes; - mpol_put(sbinfo->mpol); - sbinfo->mpol = config.mpol; /* transfers initial ref */ + /* + * Preserve previous mempolicy unless mpol remount option was specified. + */ + if (config.mpol) { + mpol_put(sbinfo->mpol); + sbinfo->mpol = config.mpol; /* transfers initial ref */ + } out: spin_unlock(&sbinfo->stat_lock); return error; diff --git a/mm/slab.c b/mm/slab.c index e901a36e2520..da2bb689a008 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1685,9 +1685,6 @@ void __init kmem_cache_init_late(void) g_cpucache_up = LATE; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@ -1695,6 +1692,9 @@ void __init kmem_cache_init_late(void) BUG(); mutex_unlock(&cache_chain_mutex); + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* Done! */ g_cpucache_up = FULL; diff --git a/mm/sparse.c b/mm/sparse.c index a8bc7d364deb..290dba25a7ed 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -619,7 +619,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { return; /* XXX: Not implemented yet */ } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { } #else @@ -660,10 +660,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) get_order(sizeof(struct page) * nr_pages)); } -static void free_map_bootmem(struct page *page, unsigned long nr_pages) +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic; + struct page *page = virt_to_page(memmap); for (i = 0; i < nr_pages; i++, page++) { magic = (unsigned long) page->lru.next; @@ -712,13 +713,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) */ if (memmap) { - struct page *memmap_page; - memmap_page = virt_to_page(memmap); - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; - free_map_bootmem(memmap_page, nr_pages); + free_map_bootmem(memmap, nr_pages); } } diff --git a/mm/truncate.c b/mm/truncate.c index 61a183b89df6..4224627695ba 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return 0; + clear_page_mlock(page); + spin_lock_irq(&mapping->tree_lock); if (PageDirty(page)) goto failed; - clear_page_mlock(page); BUG_ON(page_has_private(page)); __delete_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index be5bc0af2e76..e6ca5051c021 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1983,10 +1983,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ - ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); + ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); ap /= reclaim_stat->recent_rotated[0] + 1; - fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); + fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; spin_unlock_irq(&mz->zone->lru_lock); @@ -1999,7 +1999,7 @@ out: unsigned long scan; scan = zone_nr_lru_pages(mz, lru); - if (priority || noswap) { + if (priority || noswap || !vmscan_swappiness(mz, sc)) { scan >>= priority; if (!scan && force_scan) scan = SWAP_CLUSTER_MAX; @@ -3128,6 +3128,8 @@ static int kswapd(void *p) &balanced_classzone_idx); } } + + current->reclaim_state = NULL; return 0; } diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4d39d802be2c..912613c566cb 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -5,7 +5,7 @@ #include <linux/export.h> #include "vlan.h" -bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) +bool vlan_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; @@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); - if (!vlan_dev) { - /* Only the last call to vlan_do_receive() should change - * pkt_type to PACKET_OTHERHOST - */ - if (vlan_id && last_handler) - skb->pkt_type = PACKET_OTHERHOST; + if (!vlan_dev) return false; - } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@ -106,7 +100,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) return NULL; memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); skb->mac_header += VLAN_HLEN; - skb_reset_mac_len(skb); return skb; } @@ -140,6 +133,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) skb_reset_network_header(skb); skb_reset_transport_header(skb); + skb_reset_mac_len(skb); + return skb; err_free: diff --git a/net/atm/common.c b/net/atm/common.c index b4b44dbed645..0c0ad930a632 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; + memset(&pvc, 0, sizeof(pvc)); pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = vcc->dev->number; pvc.sap_addr.vpi = vcc->vpi; diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 3a734919c36c..ae0324021407 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, return -ENOTCONN; *sockaddr_len = sizeof(struct sockaddr_atmpvc); addr = (struct sockaddr_atmpvc *)sockaddr; + memset(addr, 0, sizeof(*addr)); addr->sap_family = AF_ATMPVC; addr->sap_addr.itf = vcc->dev->number; addr->sap_addr.vpi = vcc->vpi; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a6d5d63fb6ad..fa701b6f7948 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -72,7 +72,7 @@ static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) { return jiffies + msecs_to_jiffies( atomic_read(&bat_priv->orig_interval) - - JITTER + (random32() % 2*JITTER)); + JITTER + (random32() % (2*JITTER))); } /* when do we schedule a ogm packet to be sent */ diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ed0b87e33b2f..7a201173329e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -42,6 +42,7 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/smp.h> static void hci_le_connect(struct hci_conn *conn) { @@ -672,6 +673,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) { BT_DBG("conn %p", conn); + if (conn->type == LE_LINK) + return smp_conn_security(conn, sec_level); + /* For sdp we don't need the link key. */ if (sec_level == BT_SECURITY_SDP) return 1; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 748cdf553328..a1365e065799 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -766,6 +766,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) cancel_work_sync(&hdev->le_scan); + cancel_delayed_work(&hdev->power_off); + hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); @@ -1880,6 +1882,8 @@ void hci_unregister_dev(struct hci_dev *hdev) for (i = 0; i < NUM_REASSEMBLY; i++) kfree_skb(hdev->reassembly[i]); + cancel_work_sync(&hdev->power_on); + if (!test_bit(HCI_INIT, &hdev->flags) && !test_bit(HCI_SETUP, &hdev->dev_flags)) { hci_dev_lock(hdev); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 626318c122f4..aba4fccd768a 100755 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1357,6 +1357,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev) return false; e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); + if (!e) + return false; + if (hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; @@ -1385,12 +1388,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); - if (e) { + /* If the device was not found in a list of found devices names of which + * are pending. there is no need to continue resolving a next name as it + * will be done upon receiving another Remote Name Request Complete + * Event */ + if (!e) + return; + + list_del(&e->list); + if (name) { e->name_state = NAME_KNOWN; - list_del(&e->list); - if (name) - mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, - e->data.rssi, name, name_len); + mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, + e->data.rssi, name, name_len); + } else { + e->name_state = NAME_NOT_KNOWN; } if (hci_resolve_next_name(hdev)) @@ -1749,7 +1760,12 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; + + if (!conn->out && !hci_conn_ssp_enabled(conn) && + !hci_find_link_key(hdev, &ev->bdaddr)) + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + else + conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; @@ -2361,7 +2377,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk if (ev->opcode != HCI_OP_NOP) del_timer(&hdev->cmd_timer); - if (ev->ncmd) { + if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 5914623f426a..bedc768c8cdf 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -706,6 +706,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; + haddr->hci_channel= 0; release_sock(sk); return 0; @@ -1016,6 +1017,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char { struct hci_filter *f = &hci_pi(sk)->filter; + memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index d478be11d562..7a0d9840866d 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -949,7 +949,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, 128); + strncpy(hid->name, req->name, sizeof(req->name) - 1); strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0939c7295a64..e49f200c68e5 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -893,6 +893,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) sk = chan->sk; hci_conn_hold(conn->hcon); + conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); @@ -936,14 +937,15 @@ static void l2cap_chan_ready(struct l2cap_chan *chan) static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; + struct hci_conn *hcon = conn->hcon; BT_DBG("conn %p", conn); - if (!conn->hcon->out && conn->hcon->type == LE_LINK) + if (!hcon->out && hcon->type == LE_LINK) l2cap_le_conn_ready(conn); - if (conn->hcon->out && conn->hcon->type == LE_LINK) - smp_conn_security(conn, conn->hcon->pending_sec_level); + if (hcon->out && hcon->type == LE_LINK) + smp_conn_security(hcon, hcon->pending_sec_level); mutex_lock(&conn->chan_lock); @@ -951,8 +953,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) l2cap_chan_lock(chan); - if (conn->hcon->type == LE_LINK) { - if (smp_conn_security(conn, chan->sec_level)) + if (hcon->type == LE_LINK) { + if (smp_conn_security(hcon, chan->sec_level)) l2cap_chan_ready(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { @@ -2583,12 +2585,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); - switch (type) { - case L2CAP_CONF_RFC: - if (olen == sizeof(rfc)) - memcpy(&rfc, (void *)val, olen); - goto done; - } + if (type != L2CAP_CONF_RFC) + continue; + + if (olen != sizeof(rfc)) + break; + + memcpy(&rfc, (void *)val, olen); + goto done; } /* Use sane default values in case a misbehaving remote device diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 7ce5ab54ba5b..882c29679810 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -242,6 +242,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l BT_DBG("sock %p, sk %p", sock, sk); + memset(la, 0, sizeof(struct sockaddr_l2)); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); @@ -587,7 +588,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch break; } - if (smp_conn_security(conn, sec.level)) + if (smp_conn_security(conn->hcon, sec.level)) break; sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index c05c3a69c372..00879b8215e7 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2801,6 +2801,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) if (scan) hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + u8 ssp = 1; + + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); + } + + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + struct hci_cp_write_le_host_supported cp; + + cp.le = 1; + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); + + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp); + } + update_class(hdev); update_name(hdev, hdev->dev_name); update_eir(hdev); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index a55a43e9f70e..8d1edd7207df 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -485,7 +485,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f long timeo; int err = 0; - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; @@ -522,7 +522,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f release_sock(sk); timeo = schedule_timeout(timeo); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); @@ -546,6 +546,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int * BT_DBG("sock %p, sk %p", sock, sk); + memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) @@ -836,6 +837,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c } sec.level = rfcomm_pi(sk)->sec_level; + sec.key_size = 0; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 4bf54b377255..95a0f60fc6e0 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -467,7 +467,7 @@ static int rfcomm_get_dev_list(void __user *arg) size = sizeof(*dl) + dev_num * sizeof(*di); - dl = kmalloc(size, GFP_KERNEL); + dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index deb119875fd9..605156f13899 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -31,6 +31,8 @@ #define SMP_TIMEOUT msecs_to_jiffies(30000) +#define AUTH_REQ_MASK 0x07 + static inline void swap128(u8 src[16], u8 dst[16]) { int i; @@ -229,7 +231,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, req->max_key_size = SMP_MAX_ENC_KEY_SIZE; req->init_key_dist = 0; req->resp_key_dist = dist_keys; - req->auth_req = authreq; + req->auth_req = (authreq & AUTH_REQ_MASK); return; } @@ -238,7 +240,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; rsp->init_key_dist = 0; rsp->resp_key_dist = req->resp_key_dist & dist_keys; - rsp->auth_req = authreq; + rsp->auth_req = (authreq & AUTH_REQ_MASK); } static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) @@ -266,10 +268,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, hcon->dst_type, reason); - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - cancel_delayed_work_sync(&conn->security_timer); + cancel_delayed_work_sync(&conn->security_timer); + + if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) smp_chan_destroy(conn); - } } #define JUST_WORKS 0x00 @@ -753,9 +755,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) { - struct hci_conn *hcon = conn->hcon; + struct l2cap_conn *conn = hcon->l2cap_data; struct smp_chan *smp = conn->smp_chan; __u8 authreq; @@ -850,6 +852,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(code)); + /* + * The SMP context must be initialized for all other PDUs except + * pairing and security requests. If we get any other PDU when + * not initialized simply disconnect (done if this function + * returns an error). + */ + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && + !conn->smp_chan) { + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); + kfree_skb(skb); + return -ENOTSUPP; + } + switch (code) { case SMP_CMD_PAIRING_REQ: reason = smp_cmd_pairing_req(conn, skb); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index d7f49b63ab0f..e54ef82fdad7 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -254,6 +254,9 @@ static int br_parse_ip_options(struct sk_buff *skb) struct net_device *dev = skb->dev; u32 len; + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + goto inhdr_error; + iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index e16aade51ae0..718cbe8ad42b 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -16,6 +16,7 @@ #include <linux/etherdevice.h> #include <linux/llc.h> #include <linux/slab.h> +#include <linux/pkt_sched.h> #include <net/net_namespace.h> #include <net/llc.h> #include <net/llc_pdu.h> @@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p, skb->dev = p->dev; skb->protocol = htons(ETH_P_802_2); + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, LLC_RESERVE); memcpy(__skb_put(skb, length), data, length); diff --git a/net/can/bcm.c b/net/can/bcm.c index 151b7730c12c..3910c1fefd04 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->sk = sk; op->ifindex = ifindex; + /* ifindex for timeout events w/o previous frame reception */ + op->rx_ifindex = ifindex; + /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->timer.function = bcm_rx_timeout_handler; diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 214c2bb43d62..925ca583c09c 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c @@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result, */ static int ceph_auth_none_create_authorizer( struct ceph_auth_client *ac, int peer_type, - struct ceph_authorizer **a, - void **buf, size_t *len, - void **reply_buf, size_t *reply_len) + struct ceph_auth_handshake *auth) { struct ceph_auth_none_info *ai = ac->private; struct ceph_none_authorizer *au = &ai->au; @@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer( dout("built authorizer len %d\n", au->buf_len); } - *a = (struct ceph_authorizer *)au; - *buf = au->buf; - *len = au->buf_len; - *reply_buf = au->reply_buf; - *reply_len = sizeof(au->reply_buf); + auth->authorizer = (struct ceph_authorizer *) au; + auth->authorizer_buf = au->buf; + auth->authorizer_buf_len = au->buf_len; + auth->authorizer_reply_buf = au->reply_buf; + auth->authorizer_reply_buf_len = sizeof (au->reply_buf); + return 0; bad2: diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 1587dc6010c6..a16bf14eb027 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, - struct ceph_authorizer **a, - void **buf, size_t *len, - void **reply_buf, size_t *reply_len) + struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; @@ -548,11 +546,12 @@ static int ceph_x_create_authorizer( return ret; } - *a = (struct ceph_authorizer *)au; - *buf = au->buf->vec.iov_base; - *len = au->buf->vec.iov_len; - *reply_buf = au->reply_buf; - *reply_len = sizeof(au->reply_buf); + auth->authorizer = (struct ceph_authorizer *) au; + auth->authorizer_buf = au->buf->vec.iov_base; + auth->authorizer_buf_len = au->buf->vec.iov_len; + auth->authorizer_reply_buf = au->reply_buf; + auth->authorizer_reply_buf_len = sizeof (au->reply_buf); + return 0; } diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index cc913193d992..b11448f052f4 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -83,7 +83,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) return -1; } } else { - pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); memcpy(&client->fsid, fsid, sizeof(*fsid)); } return 0; @@ -305,7 +304,6 @@ ceph_parse_options(char *options, const char *dev_name, /* start with defaults */ opt->flags = CEPH_OPT_DEFAULT; - opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ @@ -391,7 +389,7 @@ ceph_parse_options(char *options, const char *dev_name, /* misc */ case Opt_osdtimeout: - opt->osd_timeout = intval; + pr_warning("ignoring deprecated osdtimeout option\n"); break; case Opt_osdkeepalivetimeout: opt->osd_keepalive_timeout = intval; @@ -468,19 +466,15 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, /* msgr */ if (ceph_test_opt(client, MYIP)) myaddr = &client->options->my_addr; - client->msgr = ceph_messenger_create(myaddr, - client->supported_features, - client->required_features); - if (IS_ERR(client->msgr)) { - err = PTR_ERR(client->msgr); - goto fail; - } - client->msgr->nocrc = ceph_test_opt(client, NOCRC); + ceph_messenger_init(&client->msgr, myaddr, + client->supported_features, + client->required_features, + ceph_test_opt(client, NOCRC)); /* subsystems */ err = ceph_monc_init(&client->monc, client); if (err < 0) - goto fail_msgr; + goto fail; err = ceph_osdc_init(&client->osdc, client); if (err < 0) goto fail_monc; @@ -489,8 +483,6 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, fail_monc: ceph_monc_stop(&client->monc); -fail_msgr: - ceph_messenger_destroy(client->msgr); fail: kfree(client); return ERR_PTR(err); @@ -501,22 +493,15 @@ void ceph_destroy_client(struct ceph_client *client) { dout("destroy_client %p\n", client); + atomic_set(&client->msgr.stopping, 1); + /* unmount */ ceph_osdc_stop(&client->osdc); - /* - * make sure osd connections close out before destroying the - * auth module, which is needed to free those connections' - * ceph_authorizers. - */ - ceph_msgr_flush(); - ceph_monc_stop(&client->monc); ceph_debugfs_client_cleanup(client); - ceph_messenger_destroy(client->msgr); - ceph_destroy_options(client->options); kfree(client); diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c index d6ebb13a18a4..fbda0521a006 100644 --- a/net/ceph/crush/crush.c +++ b/net/ceph/crush/crush.c @@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg) * @b: bucket pointer * @p: item index in bucket */ -int crush_get_bucket_item_weight(struct crush_bucket *b, int p) +int crush_get_bucket_item_weight(const struct crush_bucket *b, int p) { - if (p >= b->size) + if ((__u32)p >= b->size) return 0; switch (b->alg) { @@ -37,9 +37,7 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p) case CRUSH_BUCKET_LIST: return ((struct crush_bucket_list *)b)->item_weights[p]; case CRUSH_BUCKET_TREE: - if (p & 1) - return ((struct crush_bucket_tree *)b)->node_weights[p]; - return 0; + return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)]; case CRUSH_BUCKET_STRAW: return ((struct crush_bucket_straw *)b)->item_weights[p]; } @@ -87,6 +85,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b) void crush_destroy_bucket_tree(struct crush_bucket_tree *b) { + kfree(b->h.perm); + kfree(b->h.items); kfree(b->node_weights); kfree(b); } @@ -124,10 +124,9 @@ void crush_destroy_bucket(struct crush_bucket *b) */ void crush_destroy(struct crush_map *map) { - int b; - /* buckets */ if (map->buckets) { + __s32 b; for (b = 0; b < map->max_buckets; b++) { if (map->buckets[b] == NULL) continue; @@ -138,6 +137,7 @@ void crush_destroy(struct crush_map *map) /* rules */ if (map->rules) { + __u32 b; for (b = 0; b < map->max_rules; b++) kfree(map->rules[b]); kfree(map->rules); diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index b79747c4b645..00baad5d3bde 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -32,9 +32,9 @@ * @type: storage ruleset type (user defined) * @size: output set size */ -int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) +int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size) { - int i; + __u32 i; for (i = 0; i < map->max_rules; i++) { if (map->rules[i] && @@ -72,7 +72,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket, unsigned i, s; /* start a new permutation if @x has changed */ - if (bucket->perm_x != x || bucket->perm_n == 0) { + if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { dprintk("bucket %d new x=%d\n", bucket->id, x); bucket->perm_x = x; @@ -152,8 +152,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket, return bucket->h.items[i]; } - BUG_ON(1); - return 0; + dprintk("bad list sums for bucket %d\n", bucket->h.id); + return bucket->h.items[0]; } @@ -219,7 +219,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket, static int bucket_straw_choose(struct crush_bucket_straw *bucket, int x, int r) { - int i; + __u32 i; int high = 0; __u64 high_draw = 0; __u64 draw; @@ -239,6 +239,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket, static int crush_bucket_choose(struct crush_bucket *in, int x, int r) { dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r); + BUG_ON(in->size == 0); switch (in->alg) { case CRUSH_BUCKET_UNIFORM: return bucket_uniform_choose((struct crush_bucket_uniform *)in, @@ -253,7 +254,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) return bucket_straw_choose((struct crush_bucket_straw *)in, x, r); default: - BUG_ON(1); + dprintk("unknown bucket %d alg %d\n", in->id, in->alg); return in->items[0]; } } @@ -262,7 +263,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) * true if device is marked "out" (failed, fully offloaded) * of the cluster */ -static int is_out(struct crush_map *map, __u32 *weight, int item, int x) +static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x) { if (weight[item] >= 0x10000) return 0; @@ -287,16 +288,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x) * @recurse_to_leaf: true if we want one device under each item of given type * @out2: second output vector for leaf items (if @recurse_to_leaf) */ -static int crush_choose(struct crush_map *map, +static int crush_choose(const struct crush_map *map, struct crush_bucket *bucket, - __u32 *weight, + const __u32 *weight, int x, int numrep, int type, int *out, int outpos, int firstn, int recurse_to_leaf, int *out2) { int rep; - int ftotal, flocal; + unsigned int ftotal, flocal; int retry_descent, retry_bucket, skip_rep; struct crush_bucket *in = bucket; int r; @@ -304,7 +305,7 @@ static int crush_choose(struct crush_map *map, int item = 0; int itemtype; int collide, reject; - const int orig_tries = 5; /* attempts before we fall back to search */ + const unsigned int orig_tries = 5; /* attempts before we fall back to search */ dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "", bucket->id, x, outpos, numrep); @@ -325,7 +326,7 @@ static int crush_choose(struct crush_map *map, r = rep; if (in->alg == CRUSH_BUCKET_UNIFORM) { /* be careful */ - if (firstn || numrep >= in->size) + if (firstn || (__u32)numrep >= in->size) /* r' = r + f_total */ r += ftotal; else if (in->size % numrep == 0) @@ -354,7 +355,11 @@ static int crush_choose(struct crush_map *map, item = bucket_perm_choose(in, x, r); else item = crush_bucket_choose(in, x, r); - BUG_ON(item >= map->max_devices); + if (item >= map->max_devices) { + dprintk(" bad item %d\n", item); + skip_rep = 1; + break; + } /* desired type? */ if (item < 0) @@ -365,8 +370,12 @@ static int crush_choose(struct crush_map *map, /* keep going? */ if (itemtype != type) { - BUG_ON(item >= 0 || - (-1-item) >= map->max_buckets); + if (item >= 0 || + (-1-item) >= map->max_buckets) { + dprintk(" bad item type %d\n", type); + skip_rep = 1; + break; + } in = map->buckets[-1-item]; retry_bucket = 1; continue; @@ -415,7 +424,7 @@ reject: if (collide && flocal < 3) /* retry locally a few times */ retry_bucket = 1; - else if (flocal < in->size + orig_tries) + else if (flocal <= in->size + orig_tries) /* exhaustive bucket search */ retry_bucket = 1; else if (ftotal < 20) @@ -425,7 +434,7 @@ reject: /* else give up */ skip_rep = 1; dprintk(" reject %d collide %d " - "ftotal %d flocal %d\n", + "ftotal %u flocal %u\n", reject, collide, ftotal, flocal); } @@ -456,9 +465,9 @@ reject: * @result_max: maximum result size * @force: force initial replica choice; -1 for none */ -int crush_do_rule(struct crush_map *map, +int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, - int force, __u32 *weight) + int force, const __u32 *weight) { int result_len; int force_context[CRUSH_MAX_DEPTH]; @@ -473,12 +482,15 @@ int crush_do_rule(struct crush_map *map, int osize; int *tmp; struct crush_rule *rule; - int step; + __u32 step; int i, j; int numrep; int firstn; - BUG_ON(ruleno >= map->max_rules); + if ((__u32)ruleno >= map->max_rules) { + dprintk(" bad ruleno %d\n", ruleno); + return 0; + } rule = map->rules[ruleno]; result_len = 0; @@ -488,7 +500,8 @@ int crush_do_rule(struct crush_map *map, /* * determine hierarchical context of force, if any. note * that this may or may not correspond to the specific types - * referenced by the crush rule. + * referenced by the crush rule. it will also only affect + * the first descent (TAKE). */ if (force >= 0 && force < map->max_devices && @@ -527,7 +540,8 @@ int crush_do_rule(struct crush_map *map, firstn = 1; case CRUSH_RULE_CHOOSE_LEAF_INDEP: case CRUSH_RULE_CHOOSE_INDEP: - BUG_ON(wsize == 0); + if (wsize == 0) + break; recurse_to_leaf = rule->steps[step].op == @@ -596,7 +610,9 @@ int crush_do_rule(struct crush_map *map, break; default: - BUG_ON(1); + dprintk(" unknown op %d at step %d\n", + curstep->op, step); + break; } } return result_len; diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index b780cb7947dd..9da7fdd3cd8a 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) { struct ceph_crypto_key *ckey = key->payload.data; ceph_crypto_key_destroy(ckey); + kfree(ckey); } struct key_type key_type_ceph = { diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h index 1919d1550d75..3572dc518bc9 100644 --- a/net/ceph/crypto.h +++ b/net/ceph/crypto.h @@ -16,7 +16,8 @@ struct ceph_crypto_key { static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) { - kfree(key->key); + if (key) + kfree(key->key); } extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst, diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 27d4ea315d12..680978d00446 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client) snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, client->monc.auth->global_id); + dout("ceph_debugfs_client_init %p %s\n", client, name); + + BUG_ON(client->debugfs_dir); client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); if (!client->debugfs_dir) goto out; @@ -234,6 +237,7 @@ out: void ceph_debugfs_client_cleanup(struct ceph_client *client) { + dout("ceph_debugfs_client_cleanup %p\n", client); debugfs_remove(client->debugfs_osdmap); debugfs_remove(client->debugfs_monmap); debugfs_remove(client->osdc.debugfs_file); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f0993af2ae4d..ba1037ceb496 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -29,6 +29,74 @@ * the sender. */ +/* + * We track the state of the socket on a given connection using + * values defined below. The transition to a new socket state is + * handled by a function which verifies we aren't coming from an + * unexpected state. + * + * -------- + * | NEW* | transient initial state + * -------- + * | con_sock_state_init() + * v + * ---------- + * | CLOSED | initialized, but no socket (and no + * ---------- TCP connection) + * ^ \ + * | \ con_sock_state_connecting() + * | ---------------------- + * | \ + * + con_sock_state_closed() \ + * |+--------------------------- \ + * | \ \ \ + * | ----------- \ \ + * | | CLOSING | socket event; \ \ + * | ----------- await close \ \ + * | ^ \ | + * | | \ | + * | + con_sock_state_closing() \ | + * | / \ | | + * | / --------------- | | + * | / \ v v + * | / -------------- + * | / -----------------| CONNECTING | socket created, TCP + * | | / -------------- connect initiated + * | | | con_sock_state_connected() + * | | v + * ------------- + * | CONNECTED | TCP connection established + * ------------- + * + * State values for ceph_connection->sock_state; NEW is assumed to be 0. + */ + +#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ +#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ +#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ +#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ +#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ + +/* + * connection states + */ +#define CON_STATE_CLOSED 1 /* -> PREOPEN */ +#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ +#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ +#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ +#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ +#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ + +/* + * ceph_connection flag bits + */ +#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop + * messages on errors */ +#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ +#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ +#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ +#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ + /* static tag bytes (protocol control messages) */ static char tag_msg = CEPH_MSGR_TAG_MSG; static char tag_ack = CEPH_MSGR_TAG_ACK; @@ -147,72 +215,130 @@ void ceph_msgr_flush(void) } EXPORT_SYMBOL(ceph_msgr_flush); +/* Connection socket state transition functions */ + +static void con_sock_state_init(struct ceph_connection *con) +{ + int old_state; + + old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); + if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) + printk("%s: unexpected old state %d\n", __func__, old_state); + dout("%s con %p sock %d -> %d\n", __func__, con, old_state, + CON_SOCK_STATE_CLOSED); +} + +static void con_sock_state_connecting(struct ceph_connection *con) +{ + int old_state; + + old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); + if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) + printk("%s: unexpected old state %d\n", __func__, old_state); + dout("%s con %p sock %d -> %d\n", __func__, con, old_state, + CON_SOCK_STATE_CONNECTING); +} + +static void con_sock_state_connected(struct ceph_connection *con) +{ + int old_state; + + old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); + if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) + printk("%s: unexpected old state %d\n", __func__, old_state); + dout("%s con %p sock %d -> %d\n", __func__, con, old_state, + CON_SOCK_STATE_CONNECTED); +} + +static void con_sock_state_closing(struct ceph_connection *con) +{ + int old_state; + + old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); + if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && + old_state != CON_SOCK_STATE_CONNECTED && + old_state != CON_SOCK_STATE_CLOSING)) + printk("%s: unexpected old state %d\n", __func__, old_state); + dout("%s con %p sock %d -> %d\n", __func__, con, old_state, + CON_SOCK_STATE_CLOSING); +} + +static void con_sock_state_closed(struct ceph_connection *con) +{ + int old_state; + + old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); + if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && + old_state != CON_SOCK_STATE_CLOSING && + old_state != CON_SOCK_STATE_CONNECTING && + old_state != CON_SOCK_STATE_CLOSED)) + printk("%s: unexpected old state %d\n", __func__, old_state); + dout("%s con %p sock %d -> %d\n", __func__, con, old_state, + CON_SOCK_STATE_CLOSED); +} /* * socket callback functions */ /* data available on socket, or listen socket received a connect */ -static void ceph_data_ready(struct sock *sk, int count_unused) +static void ceph_sock_data_ready(struct sock *sk, int count_unused) { struct ceph_connection *con = sk->sk_user_data; + if (atomic_read(&con->msgr->stopping)) { + return; + } if (sk->sk_state != TCP_CLOSE_WAIT) { - dout("ceph_data_ready on %p state = %lu, queueing work\n", + dout("%s on %p state = %lu, queueing work\n", __func__, con, con->state); queue_con(con); } } /* socket has buffer space for writing */ -static void ceph_write_space(struct sock *sk) +static void ceph_sock_write_space(struct sock *sk) { struct ceph_connection *con = sk->sk_user_data; /* only queue to workqueue if there is data we want to write, * and there is sufficient space in the socket buffer to accept - * more data. clear SOCK_NOSPACE so that ceph_write_space() + * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() * doesn't get called again until try_write() fills the socket * buffer. See net/ipv4/tcp_input.c:tcp_check_space() * and net/core/stream.c:sk_stream_write_space(). */ - if (test_bit(WRITE_PENDING, &con->state)) { + if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) { if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { - dout("ceph_write_space %p queueing write work\n", con); + dout("%s %p queueing write work\n", __func__, con); clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); queue_con(con); } } else { - dout("ceph_write_space %p nothing to write\n", con); + dout("%s %p nothing to write\n", __func__, con); } } /* socket's state has changed */ -static void ceph_state_change(struct sock *sk) +static void ceph_sock_state_change(struct sock *sk) { struct ceph_connection *con = sk->sk_user_data; - dout("ceph_state_change %p state = %lu sk_state = %u\n", + dout("%s %p state = %lu sk_state = %u\n", __func__, con, con->state, sk->sk_state); - if (test_bit(CLOSED, &con->state)) - return; - switch (sk->sk_state) { case TCP_CLOSE: - dout("ceph_state_change TCP_CLOSE\n"); + dout("%s TCP_CLOSE\n", __func__); case TCP_CLOSE_WAIT: - dout("ceph_state_change TCP_CLOSE_WAIT\n"); - if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) { - if (test_bit(CONNECTING, &con->state)) - con->error_msg = "connection failed"; - else - con->error_msg = "socket closed"; - queue_con(con); - } + dout("%s TCP_CLOSE_WAIT\n", __func__); + con_sock_state_closing(con); + set_bit(CON_FLAG_SOCK_CLOSED, &con->flags); + queue_con(con); break; case TCP_ESTABLISHED: - dout("ceph_state_change TCP_ESTABLISHED\n"); + dout("%s TCP_ESTABLISHED\n", __func__); + con_sock_state_connected(con); queue_con(con); break; default: /* Everything else is uninteresting */ @@ -228,9 +354,9 @@ static void set_sock_callbacks(struct socket *sock, { struct sock *sk = sock->sk; sk->sk_user_data = con; - sk->sk_data_ready = ceph_data_ready; - sk->sk_write_space = ceph_write_space; - sk->sk_state_change = ceph_state_change; + sk->sk_data_ready = ceph_sock_data_ready; + sk->sk_write_space = ceph_sock_write_space; + sk->sk_state_change = ceph_sock_state_change; } @@ -262,6 +388,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); + con_sock_state_connecting(con); ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), O_NONBLOCK); if (ret == -EINPROGRESS) { @@ -277,7 +404,6 @@ static int ceph_tcp_connect(struct ceph_connection *con) return ret; } con->sock = sock; - return 0; } @@ -333,16 +459,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, */ static int con_close_socket(struct ceph_connection *con) { - int rc; + int rc = 0; dout("con_close_socket on %p sock %p\n", con, con->sock); - if (!con->sock) - return 0; - set_bit(SOCK_CLOSED, &con->state); - rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); - sock_release(con->sock); - con->sock = NULL; - clear_bit(SOCK_CLOSED, &con->state); + if (con->sock) { + rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); + sock_release(con->sock); + con->sock = NULL; + } + + /* + * Forcibly clear the SOCK_CLOSED flag. It gets set + * independent of the connection mutex, and we could have + * received a socket close event before we had the chance to + * shut the socket down. + */ + clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags); + + con_sock_state_closed(con); return rc; } @@ -353,6 +487,10 @@ static int con_close_socket(struct ceph_connection *con) static void ceph_msg_remove(struct ceph_msg *msg) { list_del_init(&msg->list_head); + BUG_ON(msg->con == NULL); + msg->con->ops->put(msg->con); + msg->con = NULL; + ceph_msg_put(msg); } static void ceph_msg_remove_list(struct list_head *head) @@ -368,12 +506,16 @@ static void reset_connection(struct ceph_connection *con) { /* reset connection, out_queue, msg_ and connect_seq */ /* discard existing out_queue and msg_seq */ + dout("reset_connection %p\n", con); ceph_msg_remove_list(&con->out_queue); ceph_msg_remove_list(&con->out_sent); if (con->in_msg) { + BUG_ON(con->in_msg->con != con); + con->in_msg->con = NULL; ceph_msg_put(con->in_msg); con->in_msg = NULL; + con->ops->put(con); } con->connect_seq = 0; @@ -391,32 +533,44 @@ static void reset_connection(struct ceph_connection *con) */ void ceph_con_close(struct ceph_connection *con) { + mutex_lock(&con->mutex); dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr.in_addr)); - set_bit(CLOSED, &con->state); /* in case there's queued work */ - clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ - clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ - clear_bit(KEEPALIVE_PENDING, &con->state); - clear_bit(WRITE_PENDING, &con->state); - mutex_lock(&con->mutex); + con->state = CON_STATE_CLOSED; + + clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */ + clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); + clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); + clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); + clear_bit(CON_FLAG_BACKOFF, &con->flags); + reset_connection(con); con->peer_global_seq = 0; cancel_delayed_work(&con->work); + con_close_socket(con); mutex_unlock(&con->mutex); - queue_con(con); } EXPORT_SYMBOL(ceph_con_close); /* * Reopen a closed connection, with a new peer address. */ -void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) +void ceph_con_open(struct ceph_connection *con, + __u8 entity_type, __u64 entity_num, + struct ceph_entity_addr *addr) { + mutex_lock(&con->mutex); dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); - set_bit(OPENING, &con->state); - clear_bit(CLOSED, &con->state); + + WARN_ON(con->state != CON_STATE_CLOSED); + con->state = CON_STATE_PREOPEN; + + con->peer_name.type = (__u8) entity_type; + con->peer_name.num = cpu_to_le64(entity_num); + memcpy(&con->peer_addr, addr, sizeof(*addr)); con->delay = 0; /* reset backoff memory */ + mutex_unlock(&con->mutex); queue_con(con); } EXPORT_SYMBOL(ceph_con_open); @@ -430,42 +584,26 @@ bool ceph_con_opened(struct ceph_connection *con) } /* - * generic get/put - */ -struct ceph_connection *ceph_con_get(struct ceph_connection *con) -{ - int nref = __atomic_add_unless(&con->nref, 1, 0); - - dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1); - - return nref ? con : NULL; -} - -void ceph_con_put(struct ceph_connection *con) -{ - int nref = atomic_dec_return(&con->nref); - - BUG_ON(nref < 0); - if (nref == 0) { - BUG_ON(con->sock); - kfree(con); - } - dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref); -} - -/* * initialize a new connection. */ -void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) +void ceph_con_init(struct ceph_connection *con, void *private, + const struct ceph_connection_operations *ops, + struct ceph_messenger *msgr) { dout("con_init %p\n", con); memset(con, 0, sizeof(*con)); - atomic_set(&con->nref, 1); + con->private = private; + con->ops = ops; con->msgr = msgr; + + con_sock_state_init(con); + mutex_init(&con->mutex); INIT_LIST_HEAD(&con->out_queue); INIT_LIST_HEAD(&con->out_sent); INIT_DELAYED_WORK(&con->work, con_work); + + con->state = CON_STATE_CLOSED; } EXPORT_SYMBOL(ceph_con_init); @@ -486,14 +624,14 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) return ret; } -static void ceph_con_out_kvec_reset(struct ceph_connection *con) +static void con_out_kvec_reset(struct ceph_connection *con) { con->out_kvec_left = 0; con->out_kvec_bytes = 0; con->out_kvec_cur = &con->out_kvec[0]; } -static void ceph_con_out_kvec_add(struct ceph_connection *con, +static void con_out_kvec_add(struct ceph_connection *con, size_t size, void *data) { int index; @@ -507,6 +645,53 @@ static void ceph_con_out_kvec_add(struct ceph_connection *con, con->out_kvec_bytes += size; } +#ifdef CONFIG_BLOCK +static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) +{ + if (!bio) { + *iter = NULL; + *seg = 0; + return; + } + *iter = bio; + *seg = bio->bi_idx; +} + +static void iter_bio_next(struct bio **bio_iter, int *seg) +{ + if (*bio_iter == NULL) + return; + + BUG_ON(*seg >= (*bio_iter)->bi_vcnt); + + (*seg)++; + if (*seg == (*bio_iter)->bi_vcnt) + init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); +} +#endif + +static void prepare_write_message_data(struct ceph_connection *con) +{ + struct ceph_msg *msg = con->out_msg; + + BUG_ON(!msg); + BUG_ON(!msg->hdr.data_len); + + /* initialize page iterator */ + con->out_msg_pos.page = 0; + if (msg->pages) + con->out_msg_pos.page_pos = msg->page_alignment; + else + con->out_msg_pos.page_pos = 0; +#ifdef CONFIG_BLOCK + if (msg->bio) + init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); +#endif + con->out_msg_pos.data_pos = 0; + con->out_msg_pos.did_page_crc = false; + con->out_more = 1; /* data + footer will follow */ +} + /* * Prepare footer for currently outgoing message, and finish things * off. Assumes out_kvec* are already valid.. we just add on to the end. @@ -516,6 +701,8 @@ static void prepare_write_message_footer(struct ceph_connection *con) struct ceph_msg *m = con->out_msg; int v = con->out_kvec_left; + m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; + dout("prepare_write_message_footer %p\n", con); con->out_kvec_is_msg = true; con->out_kvec[v].iov_base = &m->footer; @@ -534,7 +721,7 @@ static void prepare_write_message(struct ceph_connection *con) struct ceph_msg *m; u32 crc; - ceph_con_out_kvec_reset(con); + con_out_kvec_reset(con); con->out_kvec_is_msg = true; con->out_msg_done = false; @@ -542,14 +729,16 @@ static void prepare_write_message(struct ceph_connection *con) * TCP packet that's a good thing. */ if (con->in_seq > con->in_seq_acked) { con->in_seq_acked = con->in_seq; - ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); + con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); con->out_temp_ack = cpu_to_le64(con->in_seq_acked); - ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), + con_out_kvec_add(con, sizeof (con->out_temp_ack), &con->out_temp_ack); } + BUG_ON(list_empty(&con->out_queue)); m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); con->out_msg = m; + BUG_ON(m->con != con); /* put message on sent list */ ceph_msg_get(m); @@ -572,18 +761,18 @@ static void prepare_write_message(struct ceph_connection *con) BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ - ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); - ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); - ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); + con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); + con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); + con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); if (m->middle) - ceph_con_out_kvec_add(con, m->middle->vec.iov_len, + con_out_kvec_add(con, m->middle->vec.iov_len, m->middle->vec.iov_base); /* fill in crc (except data pages), footer */ crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); con->out_msg->hdr.crc = cpu_to_le32(crc); - con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE; + con->out_msg->footer.flags = 0; crc = crc32c(0, m->front.iov_base, m->front.iov_len); con->out_msg->footer.front_crc = cpu_to_le32(crc); @@ -593,28 +782,19 @@ static void prepare_write_message(struct ceph_connection *con) con->out_msg->footer.middle_crc = cpu_to_le32(crc); } else con->out_msg->footer.middle_crc = 0; - con->out_msg->footer.data_crc = 0; - dout("prepare_write_message front_crc %u data_crc %u\n", + dout("%s front_crc %u middle_crc %u\n", __func__, le32_to_cpu(con->out_msg->footer.front_crc), le32_to_cpu(con->out_msg->footer.middle_crc)); /* is there a data payload? */ - if (le32_to_cpu(m->hdr.data_len) > 0) { - /* initialize page iterator */ - con->out_msg_pos.page = 0; - if (m->pages) - con->out_msg_pos.page_pos = m->page_alignment; - else - con->out_msg_pos.page_pos = 0; - con->out_msg_pos.data_pos = 0; - con->out_msg_pos.did_page_crc = false; - con->out_more = 1; /* data + footer will follow */ - } else { + con->out_msg->footer.data_crc = 0; + if (m->hdr.data_len) + prepare_write_message_data(con); + else /* no, queue up footer too and be done */ prepare_write_message_footer(con); - } - set_bit(WRITE_PENDING, &con->state); + set_bit(CON_FLAG_WRITE_PENDING, &con->flags); } /* @@ -626,16 +806,16 @@ static void prepare_write_ack(struct ceph_connection *con) con->in_seq_acked, con->in_seq); con->in_seq_acked = con->in_seq; - ceph_con_out_kvec_reset(con); + con_out_kvec_reset(con); - ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); + con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); con->out_temp_ack = cpu_to_le64(con->in_seq_acked); - ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), + con_out_kvec_add(con, sizeof (con->out_temp_ack), &con->out_temp_ack); con->out_more = 1; /* more will follow.. eventually.. */ - set_bit(WRITE_PENDING, &con->state); + set_bit(CON_FLAG_WRITE_PENDING, &con->flags); } /* @@ -644,63 +824,60 @@ static void prepare_write_ack(struct ceph_connection *con) static void prepare_write_keepalive(struct ceph_connection *con) { dout("prepare_write_keepalive %p\n", con); - ceph_con_out_kvec_reset(con); - ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); - set_bit(WRITE_PENDING, &con->state); + con_out_kvec_reset(con); + con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); + set_bit(CON_FLAG_WRITE_PENDING, &con->flags); } /* * Connection negotiation. */ -static int prepare_connect_authorizer(struct ceph_connection *con) +static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, + int *auth_proto) { - void *auth_buf; - int auth_len = 0; - int auth_protocol = 0; + struct ceph_auth_handshake *auth; + if (!con->ops->get_authorizer) { + con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; + con->out_connect.authorizer_len = 0; + return NULL; + } + + /* Can't hold the mutex while getting authorizer */ mutex_unlock(&con->mutex); - if (con->ops->get_authorizer) - con->ops->get_authorizer(con, &auth_buf, &auth_len, - &auth_protocol, &con->auth_reply_buf, - &con->auth_reply_buf_len, - con->auth_retry); + auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); mutex_lock(&con->mutex); - if (test_bit(CLOSED, &con->state) || - test_bit(OPENING, &con->state)) - return -EAGAIN; - - con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); - con->out_connect.authorizer_len = cpu_to_le32(auth_len); + if (IS_ERR(auth)) + return auth; + if (con->state != CON_STATE_NEGOTIATING) + return ERR_PTR(-EAGAIN); - if (auth_len) - ceph_con_out_kvec_add(con, auth_len, auth_buf); - - return 0; + con->auth_reply_buf = auth->authorizer_reply_buf; + con->auth_reply_buf_len = auth->authorizer_reply_buf_len; + return auth; } /* * We connected to a peer and are saying hello. */ -static void prepare_write_banner(struct ceph_messenger *msgr, - struct ceph_connection *con) +static void prepare_write_banner(struct ceph_connection *con) { - ceph_con_out_kvec_reset(con); - ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); - ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr), - &msgr->my_enc_addr); + con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); + con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), + &con->msgr->my_enc_addr); con->out_more = 0; - set_bit(WRITE_PENDING, &con->state); + set_bit(CON_FLAG_WRITE_PENDING, &con->flags); } -static int prepare_write_connect(struct ceph_messenger *msgr, - struct ceph_connection *con, - int include_banner) +static int prepare_write_connect(struct ceph_connection *con) { unsigned global_seq = get_global_seq(con->msgr, 0); int proto; + int auth_proto; + struct ceph_auth_handshake *auth; switch (con->peer_name.type) { case CEPH_ENTITY_TYPE_MON: @@ -719,23 +896,32 @@ static int prepare_write_connect(struct ceph_messenger *msgr, dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, con->connect_seq, global_seq, proto); - con->out_connect.features = cpu_to_le64(msgr->supported_features); + con->out_connect.features = cpu_to_le64(con->msgr->supported_features); con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); con->out_connect.global_seq = cpu_to_le32(global_seq); con->out_connect.protocol_version = cpu_to_le32(proto); con->out_connect.flags = 0; - if (include_banner) - prepare_write_banner(msgr, con); - else - ceph_con_out_kvec_reset(con); - ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect); + auth_proto = CEPH_AUTH_UNKNOWN; + auth = get_connect_authorizer(con, &auth_proto); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); + con->out_connect.authorizer_len = auth ? + cpu_to_le32(auth->authorizer_buf_len) : 0; + + con_out_kvec_add(con, sizeof (con->out_connect), + &con->out_connect); + if (auth && auth->authorizer_buf_len) + con_out_kvec_add(con, auth->authorizer_buf_len, + auth->authorizer_buf); con->out_more = 0; - set_bit(WRITE_PENDING, &con->state); + set_bit(CON_FLAG_WRITE_PENDING, &con->flags); - return prepare_connect_authorizer(con); + return 0; } /* @@ -781,30 +967,34 @@ out: return ret; /* done! */ } -#ifdef CONFIG_BLOCK -static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) +static void out_msg_pos_next(struct ceph_connection *con, struct page *page, + size_t len, size_t sent, bool in_trail) { - if (!bio) { - *iter = NULL; - *seg = 0; - return; - } - *iter = bio; - *seg = bio->bi_idx; -} + struct ceph_msg *msg = con->out_msg; -static void iter_bio_next(struct bio **bio_iter, int *seg) -{ - if (*bio_iter == NULL) - return; + BUG_ON(!msg); + BUG_ON(!sent); - BUG_ON(*seg >= (*bio_iter)->bi_vcnt); + con->out_msg_pos.data_pos += sent; + con->out_msg_pos.page_pos += sent; + if (sent < len) + return; - (*seg)++; - if (*seg == (*bio_iter)->bi_vcnt) - init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); -} + BUG_ON(sent != len); + con->out_msg_pos.page_pos = 0; + con->out_msg_pos.page++; + con->out_msg_pos.did_page_crc = false; + if (in_trail) + list_move_tail(&page->lru, + &msg->trail->head); + else if (msg->pagelist) + list_move_tail(&page->lru, + &msg->pagelist->head); +#ifdef CONFIG_BLOCK + else if (msg->bio) + iter_bio_next(&msg->bio_iter, &msg->bio_seg); #endif +} /* * Write as much message data payload as we can. If we finish, queue @@ -821,41 +1011,36 @@ static int write_partial_msg_pages(struct ceph_connection *con) bool do_datacrc = !con->msgr->nocrc; int ret; int total_max_write; - int in_trail = 0; - size_t trail_len = (msg->trail ? msg->trail->length : 0); + bool in_trail = false; + const size_t trail_len = (msg->trail ? msg->trail->length : 0); + const size_t trail_off = data_len - trail_len; dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", - con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, + con, msg, con->out_msg_pos.page, msg->nr_pages, con->out_msg_pos.page_pos); -#ifdef CONFIG_BLOCK - if (msg->bio && !msg->bio_iter) - init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); -#endif - + /* + * Iterate through each page that contains data to be + * written, and send as much as possible for each. + * + * If we are calculating the data crc (the default), we will + * need to map the page. If we have no pages, they have + * been revoked, so use the zero page. + */ while (data_len > con->out_msg_pos.data_pos) { struct page *page = NULL; int max_write = PAGE_SIZE; int bio_offset = 0; - total_max_write = data_len - trail_len - - con->out_msg_pos.data_pos; - - /* - * if we are calculating the data crc (the default), we need - * to map the page. if our pages[] has been revoked, use the - * zero page. - */ - - /* have we reached the trail part of the data? */ - if (con->out_msg_pos.data_pos >= data_len - trail_len) { - in_trail = 1; + in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; + if (!in_trail) + total_max_write = trail_off - con->out_msg_pos.data_pos; + if (in_trail) { total_max_write = data_len - con->out_msg_pos.data_pos; page = list_first_entry(&msg->trail->head, struct page, lru); - max_write = PAGE_SIZE; } else if (msg->pages) { page = msg->pages[con->out_msg_pos.page]; } else if (msg->pagelist) { @@ -878,52 +1063,32 @@ static int write_partial_msg_pages(struct ceph_connection *con) if (do_datacrc && !con->out_msg_pos.did_page_crc) { void *base; - u32 crc; - u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); + u32 crc = le32_to_cpu(msg->footer.data_crc); char *kaddr; kaddr = kmap(page); BUG_ON(kaddr == NULL); base = kaddr + con->out_msg_pos.page_pos + bio_offset; - crc = crc32c(tmpcrc, base, len); - con->out_msg->footer.data_crc = cpu_to_le32(crc); + crc = crc32c(crc, base, len); + kunmap(page); + msg->footer.data_crc = cpu_to_le32(crc); con->out_msg_pos.did_page_crc = true; } ret = ceph_tcp_sendpage(con->sock, page, con->out_msg_pos.page_pos + bio_offset, len, 1); - - if (do_datacrc) - kunmap(page); - if (ret <= 0) goto out; - con->out_msg_pos.data_pos += ret; - con->out_msg_pos.page_pos += ret; - if (ret == len) { - con->out_msg_pos.page_pos = 0; - con->out_msg_pos.page++; - con->out_msg_pos.did_page_crc = false; - if (in_trail) - list_move_tail(&page->lru, - &msg->trail->head); - else if (msg->pagelist) - list_move_tail(&page->lru, - &msg->pagelist->head); -#ifdef CONFIG_BLOCK - else if (msg->bio) - iter_bio_next(&msg->bio_iter, &msg->bio_seg); -#endif - } + out_msg_pos_next(con, page, len, (size_t) ret, in_trail); } dout("write_partial_msg_pages %p msg %p done\n", con, msg); /* prepare and queue up footer, too */ if (!do_datacrc) - con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; - ceph_con_out_kvec_reset(con); + msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; + con_out_kvec_reset(con); prepare_write_message_footer(con); ret = 1; out: @@ -992,11 +1157,10 @@ static int prepare_read_message(struct ceph_connection *con) static int read_partial(struct ceph_connection *con, - int *to, int size, void *object) + int end, int size, void *object) { - *to += size; - while (con->in_base_pos < *to) { - int left = *to - con->in_base_pos; + while (con->in_base_pos < end) { + int left = end - con->in_base_pos; int have = size - left; int ret = ceph_tcp_recvmsg(con->sock, object + have, left); if (ret <= 0) @@ -1012,37 +1176,52 @@ static int read_partial(struct ceph_connection *con, */ static int read_partial_banner(struct ceph_connection *con) { - int ret, to = 0; + int size; + int end; + int ret; dout("read_partial_banner %p at %d\n", con, con->in_base_pos); /* peer's banner */ - ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner); + size = strlen(CEPH_BANNER); + end = size; + ret = read_partial(con, end, size, con->in_banner); if (ret <= 0) goto out; - ret = read_partial(con, &to, sizeof(con->actual_peer_addr), - &con->actual_peer_addr); + + size = sizeof (con->actual_peer_addr); + end += size; + ret = read_partial(con, end, size, &con->actual_peer_addr); if (ret <= 0) goto out; - ret = read_partial(con, &to, sizeof(con->peer_addr_for_me), - &con->peer_addr_for_me); + + size = sizeof (con->peer_addr_for_me); + end += size; + ret = read_partial(con, end, size, &con->peer_addr_for_me); if (ret <= 0) goto out; + out: return ret; } static int read_partial_connect(struct ceph_connection *con) { - int ret, to = 0; + int size; + int end; + int ret; dout("read_partial_connect %p at %d\n", con, con->in_base_pos); - ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); + size = sizeof (con->in_reply); + end = size; + ret = read_partial(con, end, size, &con->in_reply); if (ret <= 0) goto out; - ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len), - con->auth_reply_buf); + + size = le32_to_cpu(con->in_reply.authorizer_len); + end += size; + ret = read_partial(con, end, size, con->auth_reply_buf); if (ret <= 0) goto out; @@ -1321,22 +1500,9 @@ static int process_banner(struct ceph_connection *con) ceph_pr_addr(&con->msgr->inst.addr.in_addr)); } - set_bit(NEGOTIATING, &con->state); - prepare_read_connect(con); return 0; } -static void fail_protocol(struct ceph_connection *con) -{ - reset_connection(con); - set_bit(CLOSED, &con->state); /* in case there's queued work */ - - mutex_unlock(&con->mutex); - if (con->ops->bad_proto) - con->ops->bad_proto(con); - mutex_lock(&con->mutex); -} - static int process_connect(struct ceph_connection *con) { u64 sup_feat = con->msgr->supported_features; @@ -1354,7 +1520,7 @@ static int process_connect(struct ceph_connection *con) ceph_pr_addr(&con->peer_addr.in_addr), sup_feat, server_feat, server_feat & ~sup_feat); con->error_msg = "missing required protocol features"; - fail_protocol(con); + reset_connection(con); return -1; case CEPH_MSGR_TAG_BADPROTOVER: @@ -1365,7 +1531,7 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->out_connect.protocol_version), le32_to_cpu(con->in_reply.protocol_version)); con->error_msg = "protocol version mismatch"; - fail_protocol(con); + reset_connection(con); return -1; case CEPH_MSGR_TAG_BADAUTHORIZER: @@ -1377,7 +1543,8 @@ static int process_connect(struct ceph_connection *con) return -1; } con->auth_retry = 1; - ret = prepare_write_connect(con->msgr, con, 0); + con_out_kvec_reset(con); + ret = prepare_write_connect(con); if (ret < 0) return ret; prepare_read_connect(con); @@ -1392,12 +1559,15 @@ static int process_connect(struct ceph_connection *con) * dropped messages. */ dout("process_connect got RESET peer seq %u\n", - le32_to_cpu(con->in_connect.connect_seq)); + le32_to_cpu(con->in_reply.connect_seq)); pr_err("%s%lld %s connection reset\n", ENTITY_NAME(con->peer_name), ceph_pr_addr(&con->peer_addr.in_addr)); reset_connection(con); - prepare_write_connect(con->msgr, con, 0); + con_out_kvec_reset(con); + ret = prepare_write_connect(con); + if (ret < 0) + return ret; prepare_read_connect(con); /* Tell ceph about it. */ @@ -1406,8 +1576,7 @@ static int process_connect(struct ceph_connection *con) if (con->ops->peer_reset) con->ops->peer_reset(con); mutex_lock(&con->mutex); - if (test_bit(CLOSED, &con->state) || - test_bit(OPENING, &con->state)) + if (con->state != CON_STATE_NEGOTIATING) return -EAGAIN; break; @@ -1416,11 +1585,14 @@ static int process_connect(struct ceph_connection *con) * If we sent a smaller connect_seq than the peer has, try * again with a larger value. */ - dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", + dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", le32_to_cpu(con->out_connect.connect_seq), - le32_to_cpu(con->in_connect.connect_seq)); - con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); - prepare_write_connect(con->msgr, con, 0); + le32_to_cpu(con->in_reply.connect_seq)); + con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); + con_out_kvec_reset(con); + ret = prepare_write_connect(con); + if (ret < 0) + return ret; prepare_read_connect(con); break; @@ -1431,10 +1603,13 @@ static int process_connect(struct ceph_connection *con) */ dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", con->peer_global_seq, - le32_to_cpu(con->in_connect.global_seq)); + le32_to_cpu(con->in_reply.global_seq)); get_global_seq(con->msgr, - le32_to_cpu(con->in_connect.global_seq)); - prepare_write_connect(con->msgr, con, 0); + le32_to_cpu(con->in_reply.global_seq)); + con_out_kvec_reset(con); + ret = prepare_write_connect(con); + if (ret < 0) + return ret; prepare_read_connect(con); break; @@ -1446,10 +1621,13 @@ static int process_connect(struct ceph_connection *con) ceph_pr_addr(&con->peer_addr.in_addr), req_feat, server_feat, req_feat & ~server_feat); con->error_msg = "missing required protocol features"; - fail_protocol(con); + reset_connection(con); return -1; } - clear_bit(CONNECTING, &con->state); + + WARN_ON(con->state != CON_STATE_NEGOTIATING); + con->state = CON_STATE_OPEN; + con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); con->connect_seq++; con->peer_features = server_feat; @@ -1461,7 +1639,9 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->in_reply.connect_seq)); if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) - set_bit(LOSSYTX, &con->state); + set_bit(CON_FLAG_LOSSYTX, &con->flags); + + con->delay = 0; /* reset backoff memory */ prepare_read_tag(con); break; @@ -1491,10 +1671,10 @@ static int process_connect(struct ceph_connection *con) */ static int read_partial_ack(struct ceph_connection *con) { - int to = 0; + int size = sizeof (con->in_temp_ack); + int end = size; - return read_partial(con, &to, sizeof(con->in_temp_ack), - &con->in_temp_ack); + return read_partial(con, end, size, &con->in_temp_ack); } @@ -1547,10 +1727,7 @@ static int read_partial_message_section(struct ceph_connection *con, return 1; } -static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr, - int *skip); - +static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); static int read_partial_message_pages(struct ceph_connection *con, struct page **pages, @@ -1593,9 +1770,6 @@ static int read_partial_message_bio(struct ceph_connection *con, void *p; int ret, left; - if (IS_ERR(bv)) - return PTR_ERR(bv); - left = min((int)(data_len - con->in_msg_pos.data_pos), (int)(bv->bv_len - con->in_msg_pos.page_pos)); @@ -1627,26 +1801,22 @@ static int read_partial_message_bio(struct ceph_connection *con, static int read_partial_message(struct ceph_connection *con) { struct ceph_msg *m = con->in_msg; + int size; + int end; int ret; - int to, left; unsigned front_len, middle_len, data_len; bool do_datacrc = !con->msgr->nocrc; - int skip; u64 seq; u32 crc; dout("read_partial_message con %p msg %p\n", con, m); /* header */ - while (con->in_base_pos < sizeof(con->in_hdr)) { - left = sizeof(con->in_hdr) - con->in_base_pos; - ret = ceph_tcp_recvmsg(con->sock, - (char *)&con->in_hdr + con->in_base_pos, - left); - if (ret <= 0) - return ret; - con->in_base_pos += ret; - } + size = sizeof (con->in_hdr); + end = size; + ret = read_partial(con, end, size, &con->in_hdr); + if (ret <= 0) + return ret; crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); if (cpu_to_le32(crc) != con->in_hdr.crc) { @@ -1686,10 +1856,13 @@ static int read_partial_message(struct ceph_connection *con) /* allocate message? */ if (!con->in_msg) { + int skip = 0; + dout("got hdr type %d front %d data %d\n", con->in_hdr.type, con->in_hdr.front_len, con->in_hdr.data_len); - skip = 0; - con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); + ret = ceph_con_in_msg_alloc(con, &skip); + if (ret < 0) + return ret; if (skip) { /* skip this message */ dout("alloc_msg said skip message\n"); @@ -1700,11 +1873,9 @@ static int read_partial_message(struct ceph_connection *con) con->in_seq++; return 0; } - if (!con->in_msg) { - con->error_msg = - "error allocating memory for incoming message"; - return -ENOMEM; - } + + BUG_ON(!con->in_msg); + BUG_ON(con->in_msg->con != con); m = con->in_msg; m->front.iov_len = 0; /* haven't read it yet */ if (m->middle) @@ -1716,6 +1887,11 @@ static int read_partial_message(struct ceph_connection *con) else con->in_msg_pos.page_pos = 0; con->in_msg_pos.data_pos = 0; + +#ifdef CONFIG_BLOCK + if (m->bio) + init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); +#endif } /* front */ @@ -1732,10 +1908,6 @@ static int read_partial_message(struct ceph_connection *con) if (ret <= 0) return ret; } -#ifdef CONFIG_BLOCK - if (m->bio && !m->bio_iter) - init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); -#endif /* (page) data */ while (con->in_msg_pos.data_pos < data_len) { @@ -1746,7 +1918,7 @@ static int read_partial_message(struct ceph_connection *con) return ret; #ifdef CONFIG_BLOCK } else if (m->bio) { - + BUG_ON(!m->bio_iter); ret = read_partial_message_bio(con, &m->bio_iter, &m->bio_seg, data_len, do_datacrc); @@ -1759,16 +1931,12 @@ static int read_partial_message(struct ceph_connection *con) } /* footer */ - to = sizeof(m->hdr) + sizeof(m->footer); - while (con->in_base_pos < to) { - left = to - con->in_base_pos; - ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer + - (con->in_base_pos - sizeof(m->hdr)), - left); - if (ret <= 0) - return ret; - con->in_base_pos += ret; - } + size = sizeof (m->footer); + end += size; + ret = read_partial(con, end, size, &m->footer); + if (ret <= 0) + return ret; + dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", m, front_len, m->footer.front_crc, middle_len, m->footer.middle_crc, data_len, m->footer.data_crc); @@ -1804,8 +1972,11 @@ static void process_message(struct ceph_connection *con) { struct ceph_msg *msg; + BUG_ON(con->in_msg->con != con); + con->in_msg->con = NULL; msg = con->in_msg; con->in_msg = NULL; + con->ops->put(con); /* if first message, set peer_name */ if (con->peer_name.type == 0) @@ -1825,7 +1996,6 @@ static void process_message(struct ceph_connection *con) con->ops->dispatch(con, msg); mutex_lock(&con->mutex); - prepare_read_tag(con); } @@ -1835,21 +2005,21 @@ static void process_message(struct ceph_connection *con) */ static int try_write(struct ceph_connection *con) { - struct ceph_messenger *msgr = con->msgr; int ret = 1; - dout("try_write start %p state %lu nref %d\n", con, con->state, - atomic_read(&con->nref)); + dout("try_write start %p state %lu\n", con, con->state); more: dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); /* open the socket first? */ - if (con->sock == NULL) { - prepare_write_connect(msgr, con, 1); + if (con->state == CON_STATE_PREOPEN) { + BUG_ON(con->sock); + con->state = CON_STATE_CONNECTING; + + con_out_kvec_reset(con); + prepare_write_banner(con); prepare_read_banner(con); - set_bit(CONNECTING, &con->state); - clear_bit(NEGOTIATING, &con->state); BUG_ON(con->in_msg); con->in_tag = CEPH_MSGR_TAG_READY; @@ -1896,7 +2066,7 @@ more_kvec: } do_next: - if (!test_bit(CONNECTING, &con->state)) { + if (con->state == CON_STATE_OPEN) { /* is anything else pending? */ if (!list_empty(&con->out_queue)) { prepare_write_message(con); @@ -1906,14 +2076,15 @@ do_next: prepare_write_ack(con); goto more; } - if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) { + if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING, + &con->flags)) { prepare_write_keepalive(con); goto more; } } /* Nothing to do! */ - clear_bit(WRITE_PENDING, &con->state); + clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); dout("try_write nothing else to write.\n"); ret = 0; out: @@ -1930,38 +2101,45 @@ static int try_read(struct ceph_connection *con) { int ret = -1; - if (!con->sock) - return 0; - - if (test_bit(STANDBY, &con->state)) +more: + dout("try_read start on %p state %lu\n", con, con->state); + if (con->state != CON_STATE_CONNECTING && + con->state != CON_STATE_NEGOTIATING && + con->state != CON_STATE_OPEN) return 0; - dout("try_read start on %p\n", con); + BUG_ON(!con->sock); -more: dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, con->in_base_pos); - /* - * process_connect and process_message drop and re-take - * con->mutex. make sure we handle a racing close or reopen. - */ - if (test_bit(CLOSED, &con->state) || - test_bit(OPENING, &con->state)) { - ret = -EAGAIN; + if (con->state == CON_STATE_CONNECTING) { + dout("try_read connecting\n"); + ret = read_partial_banner(con); + if (ret <= 0) + goto out; + ret = process_banner(con); + if (ret < 0) + goto out; + + con->state = CON_STATE_NEGOTIATING; + + /* + * Received banner is good, exchange connection info. + * Do not reset out_kvec, as sending our banner raced + * with receiving peer banner after connect completed. + */ + ret = prepare_write_connect(con); + if (ret < 0) + goto out; + prepare_read_connect(con); + + /* Send connection info before awaiting response */ goto out; } - if (test_bit(CONNECTING, &con->state)) { - if (!test_bit(NEGOTIATING, &con->state)) { - dout("try_read connecting\n"); - ret = read_partial_banner(con); - if (ret <= 0) - goto out; - ret = process_banner(con); - if (ret < 0) - goto out; - } + if (con->state == CON_STATE_NEGOTIATING) { + dout("try_read negotiating\n"); ret = read_partial_connect(con); if (ret <= 0) goto out; @@ -1971,6 +2149,8 @@ more: goto more; } + WARN_ON(con->state != CON_STATE_OPEN); + if (con->in_base_pos < 0) { /* * skipping + discarding content. @@ -2004,7 +2184,8 @@ more: prepare_read_ack(con); break; case CEPH_MSGR_TAG_CLOSE: - set_bit(CLOSED, &con->state); /* fixme */ + con_close_socket(con); + con->state = CON_STATE_CLOSED; goto out; default: goto bad_tag; @@ -2027,6 +2208,8 @@ more: if (con->in_tag == CEPH_MSGR_TAG_READY) goto more; process_message(con); + if (con->state == CON_STATE_OPEN) + prepare_read_tag(con); goto more; } if (con->in_tag == CEPH_MSGR_TAG_ACK) { @@ -2055,12 +2238,6 @@ bad_tag: */ static void queue_con(struct ceph_connection *con) { - if (test_bit(DEAD, &con->state)) { - dout("queue_con %p ignoring: DEAD\n", - con); - return; - } - if (!con->ops->get(con)) { dout("queue_con %p ref count 0\n", con); return; @@ -2074,6 +2251,35 @@ static void queue_con(struct ceph_connection *con) } } +static bool con_sock_closed(struct ceph_connection *con) +{ + if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) + return false; + +#define CASE(x) \ + case CON_STATE_ ## x: \ + con->error_msg = "socket closed (con state " #x ")"; \ + break; + + switch (con->state) { + CASE(CLOSED); + CASE(PREOPEN); + CASE(CONNECTING); + CASE(NEGOTIATING); + CASE(OPEN); + CASE(STANDBY); + default: + pr_warning("%s con %p unrecognized state %lu\n", + __func__, con, con->state); + con->error_msg = "unrecognized con state"; + BUG(); + break; + } +#undef CASE + + return true; +} + /* * Do some work on a connection. Drop a connection ref when we're done. */ @@ -2085,7 +2291,10 @@ static void con_work(struct work_struct *work) mutex_lock(&con->mutex); restart: - if (test_and_clear_bit(BACKOFF, &con->state)) { + if (con_sock_closed(con)) + goto fault; + + if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { dout("con_work %p backing off\n", con); if (queue_delayed_work(ceph_msgr_wq, &con->work, round_jiffies_relative(con->delay))) { @@ -2093,41 +2302,42 @@ restart: mutex_unlock(&con->mutex); return; } else { - con->ops->put(con); dout("con_work %p FAILED to back off %lu\n", con, con->delay); + set_bit(CON_FLAG_BACKOFF, &con->flags); } + goto done; } - if (test_bit(STANDBY, &con->state)) { + if (con->state == CON_STATE_STANDBY) { dout("con_work %p STANDBY\n", con); goto done; } - if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ - dout("con_work CLOSED\n"); - con_close_socket(con); + if (con->state == CON_STATE_CLOSED) { + dout("con_work %p CLOSED\n", con); + BUG_ON(con->sock); goto done; } - if (test_and_clear_bit(OPENING, &con->state)) { - /* reopen w/ new peer */ + if (con->state == CON_STATE_PREOPEN) { dout("con_work OPENING\n"); - con_close_socket(con); + BUG_ON(con->sock); } - if (test_and_clear_bit(SOCK_CLOSED, &con->state)) - goto fault; - ret = try_read(con); if (ret == -EAGAIN) goto restart; - if (ret < 0) + if (ret < 0) { + con->error_msg = "socket error on read"; goto fault; + } ret = try_write(con); if (ret == -EAGAIN) goto restart; - if (ret < 0) + if (ret < 0) { + con->error_msg = "socket error on write"; goto fault; + } done: mutex_unlock(&con->mutex); @@ -2136,7 +2346,6 @@ done_unlocked: return; fault: - mutex_unlock(&con->mutex); ceph_fault(con); /* error/fault path */ goto done_unlocked; } @@ -2147,26 +2356,31 @@ fault: * exponential backoff */ static void ceph_fault(struct ceph_connection *con) + __releases(con->mutex) { - pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), + pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); dout("fault %p state %lu to peer %s\n", con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); - if (test_bit(LOSSYTX, &con->state)) { - dout("fault on LOSSYTX channel\n"); - goto out; - } - - mutex_lock(&con->mutex); - if (test_bit(CLOSED, &con->state)) - goto out_unlock; + WARN_ON(con->state != CON_STATE_CONNECTING && + con->state != CON_STATE_NEGOTIATING && + con->state != CON_STATE_OPEN); con_close_socket(con); + if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) { + dout("fault on LOSSYTX channel, marking CLOSED\n"); + con->state = CON_STATE_CLOSED; + goto out_unlock; + } + if (con->in_msg) { + BUG_ON(con->in_msg->con != con); + con->in_msg->con = NULL; ceph_msg_put(con->in_msg); con->in_msg = NULL; + con->ops->put(con); } /* Requeue anything that hasn't been acked */ @@ -2175,12 +2389,13 @@ static void ceph_fault(struct ceph_connection *con) /* If there are no messages queued or keepalive pending, place * the connection in a STANDBY state */ if (list_empty(&con->out_queue) && - !test_bit(KEEPALIVE_PENDING, &con->state)) { + !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) { dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); - clear_bit(WRITE_PENDING, &con->state); - set_bit(STANDBY, &con->state); + clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); + con->state = CON_STATE_STANDBY; } else { /* retry after a delay. */ + con->state = CON_STATE_PREOPEN; if (con->delay == 0) con->delay = BASE_DELAY_INTERVAL; else if (con->delay < MAX_DELAY_INTERVAL) @@ -2201,13 +2416,12 @@ static void ceph_fault(struct ceph_connection *con) * that when con_work restarts we schedule the * delay then. */ - set_bit(BACKOFF, &con->state); + set_bit(CON_FLAG_BACKOFF, &con->flags); } } out_unlock: mutex_unlock(&con->mutex); -out: /* * in case we faulted due to authentication, invalidate our * current tickets so that we can get new ones. @@ -2224,18 +2438,14 @@ out: /* - * create a new messenger instance + * initialize a new messenger instance */ -struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, - u32 supported_features, - u32 required_features) +void ceph_messenger_init(struct ceph_messenger *msgr, + struct ceph_entity_addr *myaddr, + u32 supported_features, + u32 required_features, + bool nocrc) { - struct ceph_messenger *msgr; - - msgr = kzalloc(sizeof(*msgr), GFP_KERNEL); - if (msgr == NULL) - return ERR_PTR(-ENOMEM); - msgr->supported_features = supported_features; msgr->required_features = required_features; @@ -2248,30 +2458,23 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, msgr->inst.addr.type = 0; get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); encode_my_addr(msgr); + msgr->nocrc = nocrc; - dout("messenger_create %p\n", msgr); - return msgr; -} -EXPORT_SYMBOL(ceph_messenger_create); + atomic_set(&msgr->stopping, 0); -void ceph_messenger_destroy(struct ceph_messenger *msgr) -{ - dout("destroy %p\n", msgr); - kfree(msgr); - dout("destroyed messenger %p\n", msgr); + dout("%s %p\n", __func__, msgr); } -EXPORT_SYMBOL(ceph_messenger_destroy); +EXPORT_SYMBOL(ceph_messenger_init); static void clear_standby(struct ceph_connection *con) { /* come back from STANDBY? */ - if (test_and_clear_bit(STANDBY, &con->state)) { - mutex_lock(&con->mutex); + if (con->state == CON_STATE_STANDBY) { dout("clear_standby %p and ++connect_seq\n", con); + con->state = CON_STATE_PREOPEN; con->connect_seq++; - WARN_ON(test_bit(WRITE_PENDING, &con->state)); - WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); - mutex_unlock(&con->mutex); + WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags)); + WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)); } } @@ -2280,21 +2483,24 @@ static void clear_standby(struct ceph_connection *con) */ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) { - if (test_bit(CLOSED, &con->state)) { - dout("con_send %p closed, dropping %p\n", con, msg); - ceph_msg_put(msg); - return; - } - /* set src+dst */ msg->hdr.src = con->msgr->inst.name; - BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); - msg->needs_out_seq = true; - /* queue */ mutex_lock(&con->mutex); + + if (con->state == CON_STATE_CLOSED) { + dout("con_send %p closed, dropping %p\n", con, msg); + ceph_msg_put(msg); + mutex_unlock(&con->mutex); + return; + } + + BUG_ON(msg->con != NULL); + msg->con = con->ops->get(con); + BUG_ON(msg->con == NULL); + BUG_ON(!list_empty(&msg->list_head)); list_add_tail(&msg->list_head, &con->out_queue); dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, @@ -2303,12 +2509,13 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) le32_to_cpu(msg->hdr.front_len), le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len)); + + clear_standby(con); mutex_unlock(&con->mutex); /* if there wasn't anything waiting to send before, queue * new work */ - clear_standby(con); - if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) + if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) queue_con(con); } EXPORT_SYMBOL(ceph_con_send); @@ -2316,24 +2523,34 @@ EXPORT_SYMBOL(ceph_con_send); /* * Revoke a message that was previously queued for send */ -void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) +void ceph_msg_revoke(struct ceph_msg *msg) { + struct ceph_connection *con = msg->con; + + if (!con) + return; /* Message not in our possession */ + mutex_lock(&con->mutex); if (!list_empty(&msg->list_head)) { - dout("con_revoke %p msg %p - was on queue\n", con, msg); + dout("%s %p msg %p - was on queue\n", __func__, con, msg); list_del_init(&msg->list_head); - ceph_msg_put(msg); + BUG_ON(msg->con == NULL); + msg->con->ops->put(msg->con); + msg->con = NULL; msg->hdr.seq = 0; + + ceph_msg_put(msg); } if (con->out_msg == msg) { - dout("con_revoke %p msg %p - was sending\n", con, msg); + dout("%s %p msg %p - was sending\n", __func__, con, msg); con->out_msg = NULL; if (con->out_kvec_is_msg) { con->out_skip = con->out_kvec_bytes; con->out_kvec_is_msg = false; } - ceph_msg_put(msg); msg->hdr.seq = 0; + + ceph_msg_put(msg); } mutex_unlock(&con->mutex); } @@ -2341,17 +2558,27 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) /* * Revoke a message that we may be reading data into */ -void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) +void ceph_msg_revoke_incoming(struct ceph_msg *msg) { + struct ceph_connection *con; + + BUG_ON(msg == NULL); + if (!msg->con) { + dout("%s msg %p null con\n", __func__, msg); + + return; /* Message not in our possession */ + } + + con = msg->con; mutex_lock(&con->mutex); - if (con->in_msg && con->in_msg == msg) { + if (con->in_msg == msg) { unsigned front_len = le32_to_cpu(con->in_hdr.front_len); unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); unsigned data_len = le32_to_cpu(con->in_hdr.data_len); /* skip rest of message */ - dout("con_revoke_pages %p msg %p revoked\n", con, msg); - con->in_base_pos = con->in_base_pos - + dout("%s %p msg %p revoked\n", __func__, con, msg); + con->in_base_pos = con->in_base_pos - sizeof(struct ceph_msg_header) - front_len - middle_len - @@ -2362,8 +2589,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) con->in_tag = CEPH_MSGR_TAG_READY; con->in_seq++; } else { - dout("con_revoke_pages %p msg %p pages %p no-op\n", - con, con->in_msg, msg); + dout("%s %p in_msg %p msg %p no-op\n", + __func__, con, con->in_msg, msg); } mutex_unlock(&con->mutex); } @@ -2374,9 +2601,11 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) void ceph_con_keepalive(struct ceph_connection *con) { dout("con_keepalive %p\n", con); + mutex_lock(&con->mutex); clear_standby(con); - if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && - test_and_set_bit(WRITE_PENDING, &con->state) == 0) + mutex_unlock(&con->mutex); + if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 && + test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) queue_con(con); } EXPORT_SYMBOL(ceph_con_keepalive); @@ -2395,6 +2624,8 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, if (m == NULL) goto out; kref_init(&m->kref); + + m->con = NULL; INIT_LIST_HEAD(&m->list_head); m->hdr.tid = 0; @@ -2490,46 +2721,78 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) } /* - * Generic message allocator, for incoming messages. + * Allocate a message for receiving an incoming message on a + * connection, and save the result in con->in_msg. Uses the + * connection's private alloc_msg op if available. + * + * Returns 0 on success, or a negative error code. + * + * On success, if we set *skip = 1: + * - the next message should be skipped and ignored. + * - con->in_msg == NULL + * or if we set *skip = 0: + * - con->in_msg is non-null. + * On error (ENOMEM, EAGAIN, ...), + * - con->in_msg == NULL */ -static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr, - int *skip) +static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) { + struct ceph_msg_header *hdr = &con->in_hdr; int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); int middle_len = le32_to_cpu(hdr->middle_len); - struct ceph_msg *msg = NULL; - int ret; + int ret = 0; + + BUG_ON(con->in_msg != NULL); if (con->ops->alloc_msg) { + struct ceph_msg *msg; + mutex_unlock(&con->mutex); msg = con->ops->alloc_msg(con, hdr, skip); mutex_lock(&con->mutex); - if (!msg || *skip) - return NULL; + if (con->state != CON_STATE_OPEN) { + if (msg) + ceph_msg_put(msg); + return -EAGAIN; + } + con->in_msg = msg; + if (con->in_msg) { + con->in_msg->con = con->ops->get(con); + BUG_ON(con->in_msg->con == NULL); + } + if (*skip) { + con->in_msg = NULL; + return 0; + } + if (!con->in_msg) { + con->error_msg = + "error allocating memory for incoming message"; + return -ENOMEM; + } } - if (!msg) { - *skip = 0; - msg = ceph_msg_new(type, front_len, GFP_NOFS, false); - if (!msg) { + if (!con->in_msg) { + con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); + if (!con->in_msg) { pr_err("unable to allocate msg type %d len %d\n", type, front_len); - return NULL; + return -ENOMEM; } - msg->page_alignment = le16_to_cpu(hdr->data_off); + con->in_msg->con = con->ops->get(con); + BUG_ON(con->in_msg->con == NULL); + con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); } - memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); + memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); - if (middle_len && !msg->middle) { - ret = ceph_alloc_middle(con, msg); + if (middle_len && !con->in_msg->middle) { + ret = ceph_alloc_middle(con, con->in_msg); if (ret < 0) { - ceph_msg_put(msg); - return NULL; + ceph_msg_put(con->in_msg); + con->in_msg = NULL; } } - return msg; + return ret; } diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 1845cde26227..89a6409b4e1d 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -106,9 +106,9 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) monc->pending_auth = 1; monc->m_auth->front.iov_len = len; monc->m_auth->hdr.front_len = cpu_to_le32(len); - ceph_con_revoke(monc->con, monc->m_auth); + ceph_msg_revoke(monc->m_auth); ceph_msg_get(monc->m_auth); /* keep our ref */ - ceph_con_send(monc->con, monc->m_auth); + ceph_con_send(&monc->con, monc->m_auth); } /* @@ -117,8 +117,11 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) static void __close_session(struct ceph_mon_client *monc) { dout("__close_session closing mon%d\n", monc->cur_mon); - ceph_con_revoke(monc->con, monc->m_auth); - ceph_con_close(monc->con); + ceph_msg_revoke(monc->m_auth); + ceph_msg_revoke_incoming(monc->m_auth_reply); + ceph_msg_revoke(monc->m_subscribe); + ceph_msg_revoke_incoming(monc->m_subscribe_ack); + ceph_con_close(&monc->con); monc->cur_mon = -1; monc->pending_auth = 0; ceph_auth_reset(monc->auth); @@ -142,9 +145,8 @@ static int __open_session(struct ceph_mon_client *monc) monc->want_next_osdmap = !!monc->want_next_osdmap; dout("open_session mon%d opening\n", monc->cur_mon); - monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; - monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); - ceph_con_open(monc->con, + ceph_con_open(&monc->con, + CEPH_ENTITY_TYPE_MON, monc->cur_mon, &monc->monmap->mon_inst[monc->cur_mon].addr); /* initiatiate authentication handshake */ @@ -226,8 +228,8 @@ static void __send_subscribe(struct ceph_mon_client *monc) msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - ceph_con_revoke(monc->con, msg); - ceph_con_send(monc->con, ceph_msg_get(msg)); + ceph_msg_revoke(msg); + ceph_con_send(&monc->con, ceph_msg_get(msg)); monc->sub_sent = jiffies | 1; /* never 0 */ } @@ -247,7 +249,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, if (monc->hunting) { pr_info("mon%d %s session established\n", monc->cur_mon, - ceph_pr_addr(&monc->con->peer_addr.in_addr)); + ceph_pr_addr(&monc->con.peer_addr.in_addr)); monc->hunting = false; } dout("handle_subscribe_ack after %d seconds\n", seconds); @@ -309,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) EXPORT_SYMBOL(ceph_monc_open_session); /* + * We require the fsid and global_id in order to initialize our + * debugfs dir. + */ +static bool have_debugfs_info(struct ceph_mon_client *monc) +{ + dout("have_debugfs_info fsid %d globalid %lld\n", + (int)monc->client->have_fsid, monc->auth->global_id); + return monc->client->have_fsid && monc->auth->global_id > 0; +} + +/* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. */ @@ -318,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; void *p, *end; + int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); + had_debugfs_info = have_debugfs_info(monc); + dout("handle_monmap\n"); p = msg->front.iov_base; end = p + msg->front.iov_len; @@ -342,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, if (!client->have_fsid) { client->have_fsid = true; + if (!had_debugfs_info && have_debugfs_info(monc)) { + pr_info("client%lld fsid %pU\n", + ceph_client_id(monc->client), + &monc->client->fsid); + init_debugfs = 1; + } mutex_unlock(&monc->mutex); - /* - * do debugfs initialization without mutex to avoid - * creating a locking dependency - */ - ceph_debugfs_client_init(client); + + if (init_debugfs) { + /* + * do debugfs initialization without mutex to avoid + * creating a locking dependency + */ + ceph_debugfs_client_init(monc->client); + } + goto out_unlocked; } out: @@ -439,6 +465,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con, m = NULL; } else { dout("get_generic_reply %lld got %p\n", tid, req->reply); + *skip = 0; m = ceph_msg_get(req->reply); /* * we don't need to track the connection reading into @@ -461,7 +488,7 @@ static int do_generic_request(struct ceph_mon_client *monc, req->request->hdr.tid = cpu_to_le64(req->tid); __insert_generic_request(monc, req); monc->num_generic_requests++; - ceph_con_send(monc->con, ceph_msg_get(req->request)); + ceph_con_send(&monc->con, ceph_msg_get(req->request)); mutex_unlock(&monc->mutex); err = wait_for_completion_interruptible(&req->completion); @@ -684,8 +711,9 @@ static void __resend_generic_request(struct ceph_mon_client *monc) for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_mon_generic_request, node); - ceph_con_revoke(monc->con, req->request); - ceph_con_send(monc->con, ceph_msg_get(req->request)); + ceph_msg_revoke(req->request); + ceph_msg_revoke_incoming(req->reply); + ceph_con_send(&monc->con, ceph_msg_get(req->request)); } } @@ -705,7 +733,7 @@ static void delayed_work(struct work_struct *work) __close_session(monc); __open_session(monc); /* continue hunting */ } else { - ceph_con_keepalive(monc->con); + ceph_con_keepalive(&monc->con); __validate_auth(monc); @@ -760,19 +788,12 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) goto out; /* connection */ - monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); - if (!monc->con) - goto out_monmap; - ceph_con_init(monc->client->msgr, monc->con); - monc->con->private = monc; - monc->con->ops = &mon_con_ops; - /* authentication */ monc->auth = ceph_auth_init(cl->options->name, cl->options->key); if (IS_ERR(monc->auth)) { err = PTR_ERR(monc->auth); - goto out_con; + goto out_monmap; } monc->auth->want_keys = CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | @@ -801,6 +822,9 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) if (!monc->m_auth) goto out_auth_reply; + ceph_con_init(&monc->con, monc, &mon_con_ops, + &monc->client->msgr); + monc->cur_mon = -1; monc->hunting = true; monc->sub_renew_after = jiffies; @@ -824,8 +848,6 @@ out_subscribe_ack: ceph_msg_put(monc->m_subscribe_ack); out_auth: ceph_auth_destroy(monc->auth); -out_con: - monc->con->ops->put(monc->con); out_monmap: kfree(monc->monmap); out: @@ -841,12 +863,16 @@ void ceph_monc_stop(struct ceph_mon_client *monc) mutex_lock(&monc->mutex); __close_session(monc); - monc->con->private = NULL; - monc->con->ops->put(monc->con); - monc->con = NULL; - mutex_unlock(&monc->mutex); + /* + * flush msgr queue before we destroy ourselves to ensure that: + * - any work that references our embedded con is finished. + * - any osd_client or other work that may reference an authorizer + * finishes before we shut down the auth subsystem. + */ + ceph_msgr_flush(); + ceph_auth_destroy(monc->auth); ceph_msg_put(monc->m_auth); @@ -863,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc, { int ret; int was_auth = 0; + int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); + had_debugfs_info = have_debugfs_info(monc); if (monc->auth->ops) was_auth = monc->auth->ops->is_authenticated(monc->auth); monc->pending_auth = 0; @@ -880,14 +908,29 @@ static void handle_auth_reply(struct ceph_mon_client *monc, } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); - monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; - monc->client->msgr->inst.name.num = + monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; + monc->client->msgr.inst.name.num = cpu_to_le64(monc->auth->global_id); __send_subscribe(monc); __resend_generic_request(monc); } + + if (!had_debugfs_info && have_debugfs_info(monc)) { + pr_info("client%lld fsid %pU\n", + ceph_client_id(monc->client), + &monc->client->fsid); + init_debugfs = 1; + } mutex_unlock(&monc->mutex); + + if (init_debugfs) { + /* + * do debugfs initialization without mutex to avoid + * creating a locking dependency + */ + ceph_debugfs_client_init(monc->client); + } } static int __validate_auth(struct ceph_mon_client *monc) @@ -992,6 +1035,8 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: m = ceph_msg_new(type, front_len, GFP_NOFS, false); + if (!m) + return NULL; /* ENOMEM--return skip == 0 */ break; } @@ -1021,7 +1066,7 @@ static void mon_fault(struct ceph_connection *con) if (!monc->hunting) pr_info("mon%d %s session lost, " "hunting for new mon\n", monc->cur_mon, - ceph_pr_addr(&monc->con->peer_addr.in_addr)); + ceph_pr_addr(&monc->con.peer_addr.in_addr)); __close_session(monc); if (!monc->hunting) { @@ -1036,9 +1081,23 @@ out: mutex_unlock(&monc->mutex); } +/* + * We can ignore refcounting on the connection struct, as all references + * will come from the messenger workqueue, which is drained prior to + * mon_client destruction. + */ +static struct ceph_connection *con_get(struct ceph_connection *con) +{ + return con; +} + +static void con_put(struct ceph_connection *con) +{ +} + static const struct ceph_connection_operations mon_con_ops = { - .get = ceph_con_get, - .put = ceph_con_put, + .get = con_get, + .put = con_put, .dispatch = dispatch, .fault = mon_fault, .alloc_msg = mon_alloc_msg, diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c index 11d5f4196a73..ddec1c10ac80 100644 --- a/net/ceph/msgpool.c +++ b/net/ceph/msgpool.c @@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg) struct ceph_msgpool *pool = arg; struct ceph_msg *msg; - msg = ceph_msg_new(0, pool->front_len, gfp_mask, true); + msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); if (!msg) { dout("msgpool_alloc %s failed\n", pool->name); } else { @@ -32,10 +32,11 @@ static void msgpool_free(void *element, void *arg) ceph_msg_put(msg); } -int ceph_msgpool_init(struct ceph_msgpool *pool, +int ceph_msgpool_init(struct ceph_msgpool *pool, int type, int front_len, int size, bool blocking, const char *name) { dout("msgpool %s init\n", name); + pool->type = type; pool->front_len = front_len; pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) @@ -61,7 +62,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, WARN_ON(1); /* try to alloc a fresh message */ - return ceph_msg_new(0, front_len, GFP_NOFS, false); + return ceph_msg_new(pool->type, front_len, GFP_NOFS, false); } msg = mempool_alloc(pool->pool, GFP_NOFS); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 5e254055c910..b16dfa25e750 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -52,7 +52,7 @@ static int op_has_extent(int op) op == CEPH_OSD_OP_WRITE); } -void ceph_calc_raw_layout(struct ceph_osd_client *osdc, +int ceph_calc_raw_layout(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, u64 snapid, u64 off, u64 *plen, u64 *bno, @@ -62,12 +62,15 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; u64 orig_len = *plen; u64 objoff, objlen; /* extent in object */ + int r; reqhead->snapid = cpu_to_le64(snapid); /* object extent? */ - ceph_calc_file_object_mapping(layout, off, plen, bno, - &objoff, &objlen); + r = ceph_calc_file_object_mapping(layout, off, plen, bno, + &objoff, &objlen); + if (r < 0) + return r; if (*plen < orig_len) dout(" skipping last %llu, final file extent %llu~%llu\n", orig_len - *plen, off, *plen); @@ -83,7 +86,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", *bno, objoff, objlen, req->r_num_pages); - + return 0; } EXPORT_SYMBOL(ceph_calc_raw_layout); @@ -112,20 +115,25 @@ EXPORT_SYMBOL(ceph_calc_raw_layout); * * fill osd op in request message. */ -static void calc_layout(struct ceph_osd_client *osdc, - struct ceph_vino vino, - struct ceph_file_layout *layout, - u64 off, u64 *plen, - struct ceph_osd_request *req, - struct ceph_osd_req_op *op) +static int calc_layout(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + struct ceph_osd_request *req, + struct ceph_osd_req_op *op) { u64 bno; + int r; - ceph_calc_raw_layout(osdc, layout, vino.snap, off, - plen, &bno, req, op); + r = ceph_calc_raw_layout(osdc, layout, vino.snap, off, + plen, &bno, req, op); + if (r < 0) + return r; snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); + + return r; } /* @@ -139,15 +147,14 @@ void ceph_osdc_release_request(struct kref *kref) if (req->r_request) ceph_msg_put(req->r_request); - if (req->r_reply) - ceph_msg_put(req->r_reply); if (req->r_con_filling_msg) { - dout("release_request revoking pages %p from con %p\n", + dout("%s revoking pages %p from con %p\n", __func__, req->r_pages, req->r_con_filling_msg); - ceph_con_revoke_message(req->r_con_filling_msg, - req->r_reply); - ceph_con_put(req->r_con_filling_msg); + ceph_msg_revoke_incoming(req->r_reply); + req->r_con_filling_msg->ops->put(req->r_con_filling_msg); } + if (req->r_reply) + ceph_msg_put(req->r_reply); if (req->r_own_pages) ceph_release_page_vector(req->r_pages, req->r_num_pages); @@ -214,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); + RB_CLEAR_NODE(&req->r_node); INIT_LIST_HEAD(&req->r_unsafe_item); INIT_LIST_HEAD(&req->r_linger_item); INIT_LIST_HEAD(&req->r_linger_osd); @@ -243,6 +251,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } ceph_pagelist_init(req->r_trail); } + /* create request message; allow space for oid */ msg_size += MAX_OBJ_NAME_SIZE; if (snapc) @@ -256,7 +265,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, return NULL; } - msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); memset(msg->front.iov_base, 0, msg->front.iov_len); req->r_request = msg; @@ -278,7 +286,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req, { dst->op = cpu_to_le16(src->op); - switch (dst->op) { + switch (src->op) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: dst->extent.offset = @@ -454,6 +462,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, { struct ceph_osd_req_op ops[3]; struct ceph_osd_request *req; + int r; ops[0].op = opcode; ops[0].extent.truncate_seq = truncate_seq; @@ -472,10 +481,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, use_mempool, GFP_NOFS, NULL, NULL); if (!req) - return NULL; + return ERR_PTR(-ENOMEM); /* calculate max write size */ - calc_layout(osdc, vino, layout, off, plen, req, ops); + r = calc_layout(osdc, vino, layout, off, plen, req, ops); + if (r < 0) + return ERR_PTR(r); req->r_file_layout = *layout; /* keep a copy */ /* in case it differs from natural (file) alignment that @@ -568,7 +579,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, dout("__kick_osd_requests osd%d\n", osd->o_osd); err = __reset_osd(osdc, osd); - if (err == -EAGAIN) + if (err) return; list_for_each_entry(req, &osd->o_requests, r_osd_item) { @@ -595,14 +606,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, } } -static void kick_osd_requests(struct ceph_osd_client *osdc, - struct ceph_osd *kickosd) -{ - mutex_lock(&osdc->request_mutex); - __kick_osd_requests(osdc, kickosd); - mutex_unlock(&osdc->request_mutex); -} - /* * If the osd connection drops, we need to resubmit all requests. */ @@ -616,7 +619,9 @@ static void osd_reset(struct ceph_connection *con) dout("osd_reset osd%d\n", osd->o_osd); osdc = osd->o_osdc; down_read(&osdc->map_sem); - kick_osd_requests(osdc, osd); + mutex_lock(&osdc->request_mutex); + __kick_osd_requests(osdc, osd); + mutex_unlock(&osdc->request_mutex); send_queued(osdc); up_read(&osdc->map_sem); } @@ -624,7 +629,7 @@ static void osd_reset(struct ceph_connection *con) /* * Track open sessions with osds. */ -static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) +static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) { struct ceph_osd *osd; @@ -634,15 +639,14 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) atomic_set(&osd->o_ref, 1); osd->o_osdc = osdc; + osd->o_osd = onum; + RB_CLEAR_NODE(&osd->o_node); INIT_LIST_HEAD(&osd->o_requests); INIT_LIST_HEAD(&osd->o_linger_requests); INIT_LIST_HEAD(&osd->o_osd_lru); osd->o_incarnation = 1; - ceph_con_init(osdc->client->msgr, &osd->o_con); - osd->o_con.private = osd; - osd->o_con.ops = &osd_con_ops; - osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; + ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); INIT_LIST_HEAD(&osd->o_keepalive_item); return osd; @@ -664,11 +668,11 @@ static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); - if (atomic_dec_and_test(&osd->o_ref)) { + if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; - if (osd->o_authorizer) - ac->ops->destroy_authorizer(ac, osd->o_authorizer); + if (ac->ops && ac->ops->destroy_authorizer) + ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer); kfree(osd); } } @@ -740,6 +744,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) if (list_empty(&osd->o_requests) && list_empty(&osd->o_linger_requests)) { __remove_osd(osdc, osd); + ret = -ENODEV; } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], &osd->o_con.peer_addr, sizeof(osd->o_con.peer_addr)) == 0 && @@ -752,7 +757,8 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) ret = -EAGAIN; } else { ceph_con_close(&osd->o_con); - ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); + ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, + &osdc->osdmap->osd_addr[osd->o_osd]); osd->o_incarnation++; } return ret; @@ -841,13 +847,19 @@ static void register_request(struct ceph_osd_client *osdc, static void __unregister_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { + if (RB_EMPTY_NODE(&req->r_node)) { + dout("__unregister_request %p tid %lld not registered\n", + req, req->r_tid); + return; + } + dout("__unregister_request %p tid %lld\n", req, req->r_tid); rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; if (req->r_osd) { /* make sure the original request isn't in flight. */ - ceph_con_revoke(&req->r_osd->o_con, req->r_request); + ceph_msg_revoke(req->r_request); list_del_init(&req->r_osd_item); if (list_empty(&req->r_osd->o_requests) && @@ -859,9 +871,9 @@ static void __unregister_request(struct ceph_osd_client *osdc, req->r_osd = NULL; } + list_del_init(&req->r_req_lru_item); ceph_osdc_put_request(req); - list_del_init(&req->r_req_lru_item); if (osdc->num_requests == 0) { dout(" no requests, canceling timeout\n"); __cancel_osd_timeout(osdc); @@ -874,7 +886,7 @@ static void __unregister_request(struct ceph_osd_client *osdc, static void __cancel_request(struct ceph_osd_request *req) { if (req->r_sent && req->r_osd) { - ceph_con_revoke(&req->r_osd->o_con, req->r_request); + ceph_msg_revoke(req->r_request); req->r_sent = 0; } } @@ -884,15 +896,17 @@ static void __register_linger_request(struct ceph_osd_client *osdc, { dout("__register_linger_request %p\n", req); list_add_tail(&req->r_linger_item, &osdc->req_linger); - list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests); + if (req->r_osd) + list_add_tail(&req->r_linger_osd, + &req->r_osd->o_linger_requests); } static void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__unregister_linger_request %p\n", req); + list_del_init(&req->r_linger_item); if (req->r_osd) { - list_del_init(&req->r_linger_item); list_del_init(&req->r_linger_osd); if (list_empty(&req->r_osd->o_requests) && @@ -992,18 +1006,18 @@ static int __map_request(struct ceph_osd_client *osdc, req->r_osd = __lookup_osd(osdc, o); if (!req->r_osd && o >= 0) { err = -ENOMEM; - req->r_osd = create_osd(osdc); + req->r_osd = create_osd(osdc, o); if (!req->r_osd) { list_move(&req->r_req_lru_item, &osdc->req_notarget); goto out; } dout("map_request osd %p is osd%d\n", req->r_osd, o); - req->r_osd->o_osd = o; - req->r_osd->o_con.peer_name.num = cpu_to_le64(o); __insert_osd(osdc, req->r_osd); - ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); + ceph_con_open(&req->r_osd->o_con, + CEPH_ENTITY_TYPE_OSD, o, + &osdc->osdmap->osd_addr[o]); } if (req->r_osd) { @@ -1071,12 +1085,10 @@ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); - struct ceph_osd_request *req, *last_req = NULL; + struct ceph_osd_request *req; struct ceph_osd *osd; - unsigned long timeout = osdc->client->options->osd_timeout * HZ; unsigned long keepalive = osdc->client->options->osd_keepalive_timeout * HZ; - unsigned long last_stamp = 0; struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); @@ -1086,37 +1098,6 @@ static void handle_timeout(struct work_struct *work) mutex_lock(&osdc->request_mutex); /* - * reset osds that appear to be _really_ unresponsive. this - * is a failsafe measure.. we really shouldn't be getting to - * this point if the system is working properly. the monitors - * should mark the osd as failed and we should find out about - * it from an updated osd map. - */ - while (timeout && !list_empty(&osdc->req_lru)) { - req = list_entry(osdc->req_lru.next, struct ceph_osd_request, - r_req_lru_item); - - /* hasn't been long enough since we sent it? */ - if (time_before(jiffies, req->r_stamp + timeout)) - break; - - /* hasn't been long enough since it was acked? */ - if (req->r_request->ack_stamp == 0 || - time_before(jiffies, req->r_request->ack_stamp + timeout)) - break; - - BUG_ON(req == last_req && req->r_stamp == last_stamp); - last_req = req; - last_stamp = req->r_stamp; - - osd = req->r_osd; - BUG_ON(!osd); - pr_warning(" tid %llu timed out on osd%d, will reset osd\n", - req->r_tid, osd->o_osd); - __kick_osd_requests(osdc, osd); - } - - /* * ping osds that are a bit slow. this ensures that if there * is a break in the TCP connection we will notice, and reopen * a connection with that osd (from the fault callback). @@ -1210,7 +1191,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, if (req->r_con_filling_msg == con && req->r_reply == msg) { dout(" dropping con_filling_msg ref %p\n", con); req->r_con_filling_msg = NULL; - ceph_con_put(con); + con->ops->put(con); } if (!req->r_got_reply) { @@ -1287,7 +1268,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc) * Requeue requests whose mapping to an OSD has changed. If requests map to * no osd, request a new map. * - * Caller should hold map_sem for read and request_mutex. + * Caller should hold map_sem for read. */ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) { @@ -1298,8 +1279,27 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); mutex_lock(&osdc->request_mutex); - for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { + for (p = rb_first(&osdc->requests); p; ) { req = rb_entry(p, struct ceph_osd_request, r_node); + p = rb_next(p); + + /* + * For linger requests that have not yet been + * registered, move them to the linger list; they'll + * be sent to the osd in the loop below. Unregister + * the request before re-registering it as a linger + * request to ensure the __map_request() below + * will decide it needs to be sent. + */ + if (req->r_linger && list_empty(&req->r_linger_item)) { + dout("%p tid %llu restart on osd%d\n", + req, req->r_tid, + req->r_osd ? req->r_osd->o_osd : -1); + __unregister_request(osdc, req); + __register_linger_request(osdc, req); + continue; + } + err = __map_request(osdc, req, force_resend); if (err < 0) continue; /* error */ @@ -1307,10 +1307,12 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) dout("%p tid %llu maps to no osd\n", req, req->r_tid); needmap++; /* request a newer map */ } else if (err > 0) { - dout("%p tid %llu requeued on osd%d\n", req, req->r_tid, - req->r_osd ? req->r_osd->o_osd : -1); - if (!req->r_linger) + if (!req->r_linger) { + dout("%p tid %llu requeued on osd%d\n", req, + req->r_tid, + req->r_osd ? req->r_osd->o_osd : -1); req->r_flags |= CEPH_OSD_FLAG_RETRY; + } } } @@ -1319,6 +1321,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); err = __map_request(osdc, req, force_resend); + dout("__map_request returned %d\n", err); if (err == 0) continue; /* no change and no osd was specified */ if (err < 0) @@ -1331,8 +1334,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); - __unregister_linger_request(osdc, req); __register_request(osdc, req); + __unregister_linger_request(osdc, req); } mutex_unlock(&osdc->request_mutex); @@ -1340,6 +1343,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) dout("%d requests for down osds, need new map\n", needmap); ceph_monc_request_next_osdmap(&osdc->client->monc); } + reset_changed_osds(osdc); } @@ -1385,7 +1389,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) epoch, maplen); newmap = osdmap_apply_incremental(&p, next, osdc->osdmap, - osdc->client->msgr); + &osdc->client->msgr); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad; @@ -1396,7 +1400,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) osdc->osdmap = newmap; } kick_requests(osdc, 0); - reset_changed_osds(osdc); } else { dout("ignoring incremental map %u len %d\n", epoch, maplen); @@ -1566,6 +1569,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc, event->data = data; event->osdc = osdc; INIT_LIST_HEAD(&event->osd_node); + RB_CLEAR_NODE(&event->node); kref_init(&event->kref); /* one ref for us */ kref_get(&event->kref); /* one ref for the caller */ init_completion(&event->completion); @@ -1833,11 +1837,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) if (!osdc->req_mempool) goto out; - err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true, + err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, + OSD_OP_FRONT_LEN, 10, true, "osd_op"); if (err < 0) goto out_mempool; - err = ceph_msgpool_init(&osdc->msgpool_op_reply, + err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, OSD_OPREPLY_FRONT_LEN, 10, true, "osd_op_reply"); if (err < 0) @@ -1896,8 +1901,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 0, truncate_seq, truncate_size, NULL, false, 1, page_align); - if (!req) - return -ENOMEM; + if (IS_ERR(req)) + return PTR_ERR(req); /* it may be a short read due to an object boundary */ req->r_pages = pages; @@ -1939,8 +1944,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, snapc, do_sync, truncate_seq, truncate_size, mtime, nofail, 1, page_align); - if (!req) - return -ENOMEM; + if (IS_ERR(req)) + return PTR_ERR(req); /* it may be a short write due to an object boundary */ req->r_pages = pages; @@ -2019,10 +2024,10 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, } if (req->r_con_filling_msg) { - dout("get_reply revoking msg %p from old con %p\n", + dout("%s revoking msg %p from old con %p\n", __func__, req->r_reply, req->r_con_filling_msg); - ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); - ceph_con_put(req->r_con_filling_msg); + ceph_msg_revoke_incoming(req->r_reply); + req->r_con_filling_msg->ops->put(req->r_con_filling_msg); req->r_con_filling_msg = NULL; } @@ -2057,7 +2062,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, #endif } *skip = 0; - req->r_con_filling_msg = ceph_con_get(con); + req->r_con_filling_msg = con->ops->get(con); dout("get_reply tid %lld %p\n", tid, m); out: @@ -2074,6 +2079,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con, int type = le16_to_cpu(hdr->type); int front = le32_to_cpu(hdr->front_len); + *skip = 0; switch (type) { case CEPH_MSG_OSD_MAP: case CEPH_MSG_WATCH_NOTIFY: @@ -2108,37 +2114,32 @@ static void put_osd_con(struct ceph_connection *con) /* * authentication */ -static int get_authorizer(struct ceph_connection *con, - void **buf, int *len, int *proto, - void **reply_buf, int *reply_len, int force_new) +/* + * Note: returned pointer is the address of a structure that's + * managed separately. Caller must *not* attempt to free it. + */ +static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, + int *proto, int force_new) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; - int ret = 0; + struct ceph_auth_handshake *auth = &o->o_auth; - if (force_new && o->o_authorizer) { - ac->ops->destroy_authorizer(ac, o->o_authorizer); - o->o_authorizer = NULL; - } - if (o->o_authorizer == NULL) { - ret = ac->ops->create_authorizer( - ac, CEPH_ENTITY_TYPE_OSD, - &o->o_authorizer, - &o->o_authorizer_buf, - &o->o_authorizer_buf_len, - &o->o_authorizer_reply_buf, - &o->o_authorizer_reply_buf_len); + if (force_new && auth->authorizer) { + if (ac->ops && ac->ops->destroy_authorizer) + ac->ops->destroy_authorizer(ac, auth->authorizer); + auth->authorizer = NULL; + } + if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { + int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, + auth); if (ret) - return ret; + return ERR_PTR(ret); } - *proto = ac->protocol; - *buf = o->o_authorizer_buf; - *len = o->o_authorizer_buf_len; - *reply_buf = o->o_authorizer_reply_buf; - *reply_len = o->o_authorizer_reply_buf_len; - return 0; + + return auth; } @@ -2148,7 +2149,11 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; - return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len); + /* + * XXX If ac->ops or ac->ops->verify_authorizer_reply is null, + * XXX which do we do: succeed or fail? + */ + return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) @@ -2157,7 +2162,7 @@ static int invalidate_authorizer(struct ceph_connection *con) struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; - if (ac->ops->invalidate_authorizer) + if (ac->ops && ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); return ceph_monc_validate_auth(&osdc->client->monc); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 29ad46ec9dcf..7fbe21030f54 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -495,15 +495,16 @@ static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) ceph_decode_32_safe(p, end, pool, bad); ceph_decode_32_safe(p, end, len, bad); dout(" pool %d len %d\n", pool, len); + ceph_decode_need(p, end, len, bad); pi = __lookup_pg_pool(&map->pg_pools, pool); if (pi) { + char *name = kstrndup(*p, len, GFP_NOFS); + + if (!name) + return -ENOMEM; kfree(pi->name); - pi->name = kmalloc(len + 1, GFP_NOFS); - if (pi->name) { - memcpy(pi->name, *p, len); - pi->name[len] = '\0'; - dout(" name is %s\n", pi->name); - } + pi->name = name; + dout(" name is %s\n", pi->name); } *p += len; } @@ -612,10 +613,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_32_safe(p, end, max, bad); while (max--) { ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); + err = -ENOMEM; pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) goto bad; pi->id = ceph_decode_32(p); + err = -EINVAL; ev = ceph_decode_8(p); /* encoding version */ if (ev > CEPH_PG_POOL_VERSION) { pr_warning("got unknown v %d > %d of ceph_pg_pool\n", @@ -631,8 +634,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) __insert_pg_pool(&map->pg_pools, pi); } - if (version >= 5 && __decode_pool_names(p, end, map) < 0) - goto bad; + if (version >= 5) { + err = __decode_pool_names(p, end, map); + if (err < 0) { + dout("fail to decode pool names"); + goto bad; + } + } ceph_decode_32_safe(p, end, map->pool_max, bad); @@ -673,6 +681,9 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); ceph_decode_copy(p, &pgid, sizeof(pgid)); n = ceph_decode_32(p); + err = -EINVAL; + if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) + goto bad; ceph_decode_need(p, end, n * sizeof(u32), bad); err = -ENOMEM; pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); @@ -709,7 +720,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) return map; bad: - dout("osdmap_decode fail\n"); + dout("osdmap_decode fail err %d\n", err); ceph_osdmap_destroy(map); return ERR_PTR(err); } @@ -803,6 +814,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, if (ev > CEPH_PG_POOL_VERSION) { pr_warning("got unknown v %d > %d of ceph_pg_pool\n", ev, CEPH_PG_POOL_VERSION); + err = -EINVAL; goto bad; } pi = __lookup_pg_pool(&map->pg_pools, pool); @@ -819,8 +831,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, if (err < 0) goto bad; } - if (version >= 5 && __decode_pool_names(p, end, map) < 0) - goto bad; + if (version >= 5) { + err = __decode_pool_names(p, end, map); + if (err < 0) + goto bad; + } /* old_pool */ ceph_decode_32_safe(p, end, len, bad); @@ -890,13 +905,19 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, pglen = ceph_decode_32(p); if (pglen) { - /* insert */ ceph_decode_need(p, end, pglen*sizeof(u32), bad); + + /* removing existing (if any) */ + (void) __remove_pg_mapping(&map->pg_temp, pgid); + + /* insert */ + err = -EINVAL; + if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) + goto bad; + err = -ENOMEM; pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); - if (!pg) { - err = -ENOMEM; + if (!pg) goto bad; - } pg->pgid = pgid; pg->len = pglen; for (j = 0; j < pglen; j++) @@ -940,7 +961,7 @@ bad: * for now, we write only a single su, until we can * pass a stride back to the caller. */ -void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, +int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 *plen, u64 *ono, u64 *oxoff, u64 *oxlen) @@ -954,11 +975,17 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, osize, su); + if (su == 0 || sc == 0) + goto invalid; su_per_object = osize / su; + if (su_per_object == 0) + goto invalid; dout("osize %u / su %u = su_per_object %u\n", osize, su, su_per_object); - BUG_ON((su & ~PAGE_MASK) != 0); + if ((su & ~PAGE_MASK) != 0) + goto invalid; + /* bl = *off / su; */ t = off; do_div(t, su); @@ -986,6 +1013,14 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, *plen = *oxlen; dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); + return 0; + +invalid: + dout(" invalid layout\n"); + *ono = 0; + *oxoff = 0; + *oxlen = 0; + return -EINVAL; } EXPORT_SYMBOL(ceph_calc_file_object_mapping); diff --git a/net/core/datagram.c b/net/core/datagram.c index e4fbfd6e2bd4..da7e0c867cc0 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, skb_queue_walk(queue, skb) { *peeked = skb->peeked; if (flags & MSG_PEEK) { - if (*off >= skb->len) { + if (*off >= skb->len && skb->len) { *off -= skb->len; continue; } diff --git a/net/core/dev.c b/net/core/dev.c index c299416d0e89..eb858dc6ab86 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1056,6 +1056,8 @@ rollback: */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { + char *new_ifalias; + ASSERT_RTNL(); if (len >= IFALIASZ) @@ -1069,9 +1071,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return 0; } - dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); - if (!dev->ifalias) + new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); + if (!new_ifalias) return -ENOMEM; + dev->ifalias = new_ifalias; strlcpy(dev->ifalias, alias, len+1); return len; @@ -1638,6 +1641,19 @@ static inline int deliver_skb(struct sk_buff *skb, return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) +{ + if (!ptype->af_packet_priv || !skb->sk) + return false; + + if (ptype->id_match) + return ptype->id_match(ptype, skb->sk); + else if ((struct sock *)ptype->af_packet_priv == skb->sk) + return true; + + return false; +} + /* * Support routine. Sends outgoing frames to any network * taps currently in use. @@ -1655,8 +1671,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) * they originated from - MvS (miquels@drinkel.ow.org) */ if ((ptype->dev == dev || !ptype->dev) && - (ptype->af_packet_priv == NULL || - (struct sock *)ptype->af_packet_priv != skb->sk)) { + (!skb_loop_sk(ptype, skb))) { if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; @@ -2106,7 +2121,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { - if (!can_checksum_protocol(features, protocol)) { + if (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { @@ -2121,6 +2137,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) __be16 protocol = skb->protocol; netdev_features_t features = skb->dev->features; + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + features &= ~NETIF_F_GSO_MASK; + if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; @@ -2599,15 +2618,16 @@ void __skb_get_rxhash(struct sk_buff *skb) if (!skb_flow_dissect(skb, &keys)) return; - if (keys.ports) { - if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) - swap(keys.port16[0], keys.port16[1]); + if (keys.ports) skb->l4_rxhash = 1; - } /* get a consistent hash (same value on both flow directions) */ - if ((__force u32)keys.dst < (__force u32)keys.src) + if (((__force u32)keys.dst < (__force u32)keys.src) || + (((__force u32)keys.dst == (__force u32)keys.src) && + ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { swap(keys.dst, keys.src); + swap(keys.port16[0], keys.port16[1]); + } hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src, @@ -2743,8 +2763,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - - rflow->last_qtail)) >= 0)) + rflow->last_qtail)) >= 0)) { + tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); + } if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; @@ -3189,18 +3211,18 @@ another_round: ncls: #endif - rx_handler = rcu_dereference(skb->dev->rx_handler); if (vlan_tx_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } - if (vlan_do_receive(&skb, !rx_handler)) + if (vlan_do_receive(&skb)) goto another_round; else if (unlikely(!skb)) goto out; } + rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -3220,6 +3242,9 @@ ncls: } } + if (vlan_tx_nonzero_tag_present(skb)) + skb->pkt_type = PACKET_OTHERHOST; + /* deliver only exact match when indicated */ null_or_dev = deliver_exact ? skb->dev : NULL; @@ -5909,6 +5934,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; + dev->gso_max_segs = GSO_MAX_SEGS; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); @@ -6284,7 +6310,8 @@ static struct hlist_head *netdev_create_hash(void) /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { - INIT_LIST_HEAD(&net->dev_base_head); + if (net != &init_net) + INIT_LIST_HEAD(&net->dev_base_head); net->dev_name_head = netdev_create_hash(); if (net->dev_name_head == NULL) diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 626698f0db8b..76f6d0b02f28 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -308,7 +308,8 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr, */ ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); - if (ha->addr == dev->dev_addr && ha->refcount == 1) + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == addr_type && ha->refcount == 1) return -ENOENT; err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 73b90351df5c..ac88107d1bc9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1285,8 +1285,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) if (!dst) goto discard; - __skb_pull(skb, skb_network_offset(skb)); - if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; @@ -1296,6 +1294,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) neigh_hh_init(neigh, dst); do { + __skb_pull(skb, skb_network_offset(skb)); seq = read_seqbegin(&neigh->ha_lock); err = dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); @@ -1326,9 +1325,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) unsigned int seq; int err; - __skb_pull(skb, skb_network_offset(skb)); - do { + __skb_pull(skb, skb_network_offset(skb)); seq = read_seqbegin(&neigh->ha_lock); err = dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 31a5ae51a45c..dd00b71dd092 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -25,7 +25,9 @@ static DEFINE_MUTEX(net_mutex); LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); -struct net init_net; +struct net init_net = { + .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), +}; EXPORT_SYMBOL(init_net); #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b81369b6ddc0..114d8a9e8570 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1802,10 +1802,13 @@ static ssize_t pktgen_thread_write(struct file *file, return -EFAULT; i += len; mutex_lock(&pktgen_thread_lock); - pktgen_add_device(t, f); + ret = pktgen_add_device(t, f); mutex_unlock(&pktgen_thread_lock); - ret = count; - sprintf(pg_result, "OK: add_device=%s", f); + if (!ret) { + ret = count; + sprintf(pg_result, "OK: add_device=%s", f); + } else + sprintf(pg_result, "ERROR: can not add device %s", f); goto out; } @@ -2932,7 +2935,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, sizeof(struct ipv6hdr) - sizeof(struct udphdr) - pkt_dev->pkt_overhead; - if (datalen < sizeof(struct pktgen_hdr)) { + if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); if (net_ratelimit()) pr_info("increased datalen to %d\n", datalen); diff --git a/net/core/sock.c b/net/core/sock.c index 0f8402ea434b..4b469e367923 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -644,7 +644,8 @@ set_rcvbuf: case SO_KEEPALIVE: #ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP) + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); @@ -1411,6 +1412,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; + sk->sk_gso_max_segs = dst->dev->gso_max_segs; } } } diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index b9868e1fd62c..aa74be442dfb 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -126,6 +126,9 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; + if (req->sdiag_family >= AF_MAX) + return -EINVAL; + hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) err = -ENOENT; diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 75c3582a7678..fb85d371a8de 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, optval, optlen); return rc; @@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, optval, optlen); return rc; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 70bfaf2d1965..b658f3b8a23c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -531,6 +531,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, case DCCP_SOCKOPT_CCID_TX_INFO: if (len < sizeof(tfrc)) return -EINVAL; + memset(&tfrc, 0, sizeof(tfrc)); tfrc.tfrctx_x = hc->tx_x; tfrc.tfrctx_x_recv = hc->tx_x_recv; tfrc.tfrctx_x_calc = hc->tx_x_calc; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index caf6e1734b62..c6f6e425b2e4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -435,8 +435,8 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: - bh_unlock_sock(newsk); - sock_put(newsk); + inet_csk_prepare_forced_close(newsk); + dccp_done(newsk); goto exit; } diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4dc588f520e0..aaa8f8bee9e1 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -611,7 +611,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newinet->inet_rcv_saddr = LOOPBACK4_IPV6; if (__inet_inherit_port(sk, newsk) < 0) { - sock_put(newsk); + inet_csk_prepare_forced_close(newsk); + dccp_done(newsk); goto out; } __inet6_hash(newsk, NULL); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0b711659ac74..de5ec03036f1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -240,8 +240,12 @@ EXPORT_SYMBOL(inet_listen); u32 inet_ehash_secret __read_mostly; EXPORT_SYMBOL(inet_ehash_secret); +u32 ipv6_hash_secret __read_mostly; +EXPORT_SYMBOL(ipv6_hash_secret); + /* - * inet_ehash_secret must be set exactly once + * inet_ehash_secret must be set exactly once, and to a non nul value + * ipv6_hash_secret must be set exactly once. */ void build_ehash_secret(void) { @@ -251,7 +255,8 @@ void build_ehash_secret(void) get_random_bytes(&rnd, sizeof(rnd)); } while (rnd == 0); - cmpxchg(&inet_ehash_secret, 0, rnd); + if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) + get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); } EXPORT_SYMBOL(build_ehash_secret); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 19d66cefd7d3..3f4043258542 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -659,6 +659,22 @@ void inet_csk_destroy_sock(struct sock *sk) } EXPORT_SYMBOL(inet_csk_destroy_sock); +/* This function allows to force a closure of a socket after the call to + * tcp/dccp_create_openreq_child(). + */ +void inet_csk_prepare_forced_close(struct sock *sk) +{ + /* sk_clone_lock locked the socket and set refcnt to 2 */ + bh_unlock_sock(sk); + sock_put(sk); + + /* The below has to be done to allow calling inet_csk_destroy_sock */ + sock_set_flag(sk, SOCK_DEAD); + percpu_counter_inc(sk->sk_prot->orphan_count); + inet_sk(sk)->inet_num = 0; +} +EXPORT_SYMBOL(inet_csk_prepare_forced_close); + int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) { struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8f8db724bfaf..d7b862ad4be4 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -44,6 +44,10 @@ struct inet_diag_entry { u16 dport; u16 family; u16 userlocks; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */ + struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */ +#endif }; #define INET_DIAG_PUT(skb, attrtype, attrlen) \ @@ -419,25 +423,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc, break; } - if (cond->prefix_len == 0) - break; - if (op->code == INET_DIAG_BC_S_COND) addr = entry->saddr; else addr = entry->daddr; + if (cond->family != AF_UNSPEC && + cond->family != entry->family) { + if (entry->family == AF_INET6 && + cond->family == AF_INET) { + if (addr[0] == 0 && addr[1] == 0 && + addr[2] == htonl(0xffff) && + bitstring_match(addr + 3, + cond->addr, + cond->prefix_len)) + break; + } + yes = 0; + break; + } + + if (cond->prefix_len == 0) + break; if (bitstring_match(addr, cond->addr, cond->prefix_len)) break; - if (entry->family == AF_INET6 && - cond->family == AF_INET) { - if (addr[0] == 0 && addr[1] == 0 && - addr[2] == htonl(0xffff) && - bitstring_match(addr + 3, cond->addr, - cond->prefix_len)) - break; - } yes = 0; break; } @@ -500,6 +510,55 @@ static int valid_cc(const void *bc, int len, int cc) return 0; } +/* Validate an inet_diag_hostcond. */ +static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, + int *min_len) +{ + int addr_len; + struct inet_diag_hostcond *cond; + + /* Check hostcond space. */ + *min_len += sizeof(struct inet_diag_hostcond); + if (len < *min_len) + return false; + cond = (struct inet_diag_hostcond *)(op + 1); + + /* Check address family and address length. */ + switch (cond->family) { + case AF_UNSPEC: + addr_len = 0; + break; + case AF_INET: + addr_len = sizeof(struct in_addr); + break; + case AF_INET6: + addr_len = sizeof(struct in6_addr); + break; + default: + return false; + } + *min_len += addr_len; + if (len < *min_len) + return false; + + /* Check prefix length (in bits) vs address length (in bytes). */ + if (cond->prefix_len > 8 * addr_len) + return false; + + return true; +} + +/* Validate a port comparison operator. */ +static inline bool valid_port_comparison(const struct inet_diag_bc_op *op, + int len, int *min_len) +{ + /* Port comparisons put the port in a follow-on inet_diag_bc_op. */ + *min_len += sizeof(struct inet_diag_bc_op); + if (len < *min_len) + return false; + return true; +} + static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) { const void *bc = bytecode; @@ -507,29 +566,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) while (len > 0) { const struct inet_diag_bc_op *op = bc; + int min_len = sizeof(struct inet_diag_bc_op); //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); switch (op->code) { - case INET_DIAG_BC_AUTO: case INET_DIAG_BC_S_COND: case INET_DIAG_BC_D_COND: + if (!valid_hostcond(bc, len, &min_len)) + return -EINVAL; + break; case INET_DIAG_BC_S_GE: case INET_DIAG_BC_S_LE: case INET_DIAG_BC_D_GE: case INET_DIAG_BC_D_LE: - case INET_DIAG_BC_JMP: - if (op->no < 4 || op->no > len + 4 || op->no & 3) - return -EINVAL; - if (op->no < len && - !valid_cc(bytecode, bytecode_len, len - op->no)) + if (!valid_port_comparison(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_AUTO: + case INET_DIAG_BC_JMP: case INET_DIAG_BC_NOP: break; default: return -EINVAL; } - if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) + + if (op->code != INET_DIAG_BC_NOP) { + if (op->no < min_len || op->no > len + 4 || op->no & 3) + return -EINVAL; + if (op->no < len && + !valid_cc(bytecode, bytecode_len, len - op->no)) + return -EINVAL; + } + + if (op->yes < min_len || op->yes > len + 4 || op->yes & 3) return -EINVAL; bc += op->yes; len -= op->yes; @@ -586,6 +655,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } +/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses + * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6. + */ +static inline void inet_diag_req_addrs(const struct sock *sk, + const struct request_sock *req, + struct inet_diag_entry *entry) +{ + struct inet_request_sock *ireq = inet_rsk(req); + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + if (req->rsk_ops->family == AF_INET6) { + entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32; + entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32; + } else if (req->rsk_ops->family == AF_INET) { + ipv6_addr_set_v4mapped(ireq->loc_addr, + &entry->saddr_storage); + ipv6_addr_set_v4mapped(ireq->rmt_addr, + &entry->daddr_storage); + entry->saddr = entry->saddr_storage.s6_addr32; + entry->daddr = entry->daddr_storage.s6_addr32; + } + } else +#endif + { + entry->saddr = &ireq->loc_addr; + entry->daddr = &ireq->rmt_addr; + } +} + static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, struct request_sock *req, u32 pid, u32 seq, const struct nlmsghdr *unlh) @@ -624,8 +723,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, r->idiag_inode = 0; #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { - *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; - *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; + struct inet_diag_entry entry; + inet_diag_req_addrs(sk, req, &entry); + memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr)); + memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr)); } #endif nlh->nlmsg_len = skb_tail_pointer(skb) - b; @@ -683,18 +784,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, continue; if (bc) { - entry.saddr = -#if IS_ENABLED(CONFIG_IPV6) - (entry.family == AF_INET6) ? - inet6_rsk(req)->loc_addr.s6_addr32 : -#endif - &ireq->loc_addr; - entry.daddr = -#if IS_ENABLED(CONFIG_IPV6) - (entry.family == AF_INET6) ? - inet6_rsk(req)->rmt_addr.s6_addr32 : -#endif - &ireq->rmt_addr; + inet_diag_req_addrs(sk, req, &entry); entry.dport = ntohs(ireq->rmt_port); if (!inet_diag_bc_run(bc, &entry)) @@ -875,13 +965,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc) { const struct inet_diag_handler *handler; + int err = 0; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) handler->dump(skb, cb, r, bc); + else + err = PTR_ERR(handler); inet_diag_unlock_handler(handler); - return skb->len; + return err ? : skb->len; } static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 3727e234c884..b7bf6e30adbc 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -685,28 +685,27 @@ EXPORT_SYMBOL(ip_defrag); struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { - const struct iphdr *iph; + struct iphdr iph; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; - if (!pskb_may_pull(skb, sizeof(struct iphdr))) + if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) return skb; - iph = ip_hdr(skb); - if (iph->ihl < 5 || iph->version != 4) + if (iph.ihl < 5 || iph.version != 4) return skb; - if (!pskb_may_pull(skb, iph->ihl*4)) - return skb; - iph = ip_hdr(skb); - len = ntohs(iph->tot_len); - if (skb->len < len || len < (iph->ihl * 4)) + + len = ntohs(iph.tot_len); + if (skb->len < len || len < (iph.ihl * 4)) return skb; - if (ip_is_fragment(ip_hdr(skb))) { + if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { + if (!pskb_may_pull(skb, iph.ihl*4)) + return skb; if (pskb_trim_rcsum(skb, len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 2fd0fba77124..374828487003 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -456,19 +456,28 @@ static int do_ip_setsockopt(struct sock *sk, int level, struct inet_sock *inet = inet_sk(sk); int val = 0, err; - if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | - (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | - (1<<IP_RETOPTS) | (1<<IP_TOS) | - (1<<IP_TTL) | (1<<IP_HDRINCL) | - (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | - (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | - (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | - (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || - optname == IP_UNICAST_IF || - optname == IP_MULTICAST_TTL || - optname == IP_MULTICAST_ALL || - optname == IP_MULTICAST_LOOP || - optname == IP_RECVORIGDSTADDR) { + switch (optname) { + case IP_PKTINFO: + case IP_RECVTTL: + case IP_RECVOPTS: + case IP_RECVTOS: + case IP_RETOPTS: + case IP_TOS: + case IP_TTL: + case IP_HDRINCL: + case IP_MTU_DISCOVER: + case IP_RECVERR: + case IP_ROUTER_ALERT: + case IP_FREEBIND: + case IP_PASSSEC: + case IP_TRANSPARENT: + case IP_MINTTL: + case IP_NODEFRAG: + case IP_UNICAST_IF: + case IP_MULTICAST_TTL: + case IP_MULTICAST_ALL: + case IP_MULTICAST_LOOP: + case IP_RECVORIGDSTADDR: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; @@ -580,7 +589,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_TTL: if (optlen < 1) goto e_inval; - if (val != -1 && (val < 0 || val > 255)) + if (val != -1 && (val < 1 || val > 255)) goto e_inval; inet->uc_ttl = val; break; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 960fbfc3e976..8626b645ec62 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); +static void ipmr_free_table(struct mr_table *mrt); + static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); @@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static void mroute_clean_tables(struct mr_table *mrt); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net) list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); - kfree(mrt); + ipmr_free_table(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); } @@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net) static void __net_exit ipmr_rules_exit(struct net *net) { - kfree(net->ipv4.mrt); + ipmr_free_table(net->ipv4.mrt); } #endif @@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) return mrt; } +static void ipmr_free_table(struct mr_table *mrt) +{ + del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} + /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 57932c43960e..566be2dd73f4 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { - unsigned int matchend, poff, plen, buflen, n; + unsigned int olen, matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; /* We're only interested in headers related to this @@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, goto next; } + olen = *datalen; if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; - matchend = matchoff + matchlen; + matchend = matchoff + matchlen + *datalen - olen; /* The maddr= parameter (RFC 2361) specifies where to send * the reply. */ @@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, ret = nf_ct_expect_related(rtcp_exp); if (ret == 0) break; - else if (ret != -EBUSY) { + else if (ret == -EBUSY) { + nf_ct_unexpect_related(rtp_exp); + continue; + } else if (ret < 0) { nf_ct_unexpect_related(rtp_exp); port = 0; break; diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 3828a4229822..da4098f08784 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -194,7 +194,8 @@ nf_nat_out(unsigned int hooknum, if ((ct->tuplehash[dir].tuple.src.u3.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) || - (ct->tuplehash[dir].tuple.src.u.all != + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all) ) return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; @@ -230,7 +231,8 @@ nf_nat_local_fn(unsigned int hooknum, ret = NF_DROP; } #ifdef CONFIG_XFRM - else if (ct->tuplehash[dir].tuple.dst.u.all != + else if (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.dst.u.all != ct->tuplehash[!dir].tuple.src.u.all) if (ip_xfrm_me_harder(skb)) ret = NF_DROP; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 50009c787bcd..c234bda5b801 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -321,8 +321,8 @@ void ping_err(struct sk_buff *skb, u32 info) struct iphdr *iph = (struct iphdr *)skb->data; struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); struct inet_sock *inet_sock; - int type = icmph->type; - int code = icmph->code; + int type = icmp_hdr(skb)->type; + int code = icmp_hdr(skb)->code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 8af0d44e4e22..212897517b89 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -232,7 +232,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV), SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV), - SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN), SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA), SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE), SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY), @@ -258,6 +257,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), + SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), + SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bbd604c68e68..2fe0dc28ea98 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -131,18 +131,20 @@ found: * 0 - deliver * 1 - block */ -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { - int type; + struct icmphdr _hdr; + const struct icmphdr *hdr; - if (!pskb_may_pull(skb, sizeof(struct icmphdr))) + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!hdr) return 1; - type = icmp_hdr(skb)->type; - if (type < 32) { + if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; - return ((1 << type) & data) != 0; + return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 7a7724da9bff..bf7a604c695c 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -590,6 +590,13 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_challenge_ack_limit", + .data = &sysctl_tcp_challenge_ack_limit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_NET_DMA { .procname = "tcp_dma_copybreak", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 38d6e4374561..8429ac5eb914 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -485,14 +485,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) !tp->urg_data || before(tp->urg_seq, tp->copied_seq) || !before(tp->urg_seq, tp->rcv_nxt)) { - struct sk_buff *skb; answ = tp->rcv_nxt - tp->copied_seq; - /* Subtract 1, if FIN is in queue. */ - skb = skb_peek_tail(&sk->sk_receive_queue); - if (answ && skb) - answ -= tcp_hdr(skb)->fin; + /* Subtract 1, if FIN was received */ + if (answ && sock_flag(sk, SOCK_DONE)) + answ--; } else answ = tp->urg_seq - tp->copied_seq; release_sock(sk); @@ -744,7 +742,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, old_size_goal + mss_now > xmit_size_goal)) { xmit_size_goal = old_size_goal; } else { - tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + tp->xmit_size_goal_segs = + min_t(u16, xmit_size_goal / mss_now, + sk->sk_gso_max_segs); xmit_size_goal = tp->xmit_size_goal_segs * mss_now; } } @@ -1602,8 +1602,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } #ifdef CONFIG_NET_DMA - if (tp->ucopy.dma_chan) - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + if (tp->ucopy.dma_chan) { + if (tp->rcv_wnd == 0 && + !skb_queue_empty(&sk->sk_async_wait_queue)) { + tcp_service_net_dma(sk, true); + tcp_cleanup_rbuf(sk, copied); + } else + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + } #endif if (copied >= target) { /* Do not sleep, just process backlog. */ diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 272a84593c85..69251dde7501 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -291,7 +291,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && - left * tp->mss_cache < sk->sk_gso_max_size) + left * tp->mss_cache < sk->sk_gso_max_size && + left < sk->sk_gso_max_segs) return 1; return left <= tcp_max_tso_deferred_mss(tp); } diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 813b43a76fec..834857f3c871 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext, .tcpv_rttcnt = ca->cnt_rtt, .tcpv_minrtt = ca->base_rtt, }; - u64 t = ca->sum_rtt; - do_div(t, ca->cnt_rtt); - info.tcpv_rtt = t; + if (info.tcpv_rttcnt > 0) { + u64 t = ca->sum_rtt; + do_div(t, info.tcpv_rttcnt); + info.tcpv_rtt = t; + } nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6b017383119e..b40e05b9c451 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -88,6 +88,9 @@ int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); +/* rfc5961 challenge ack rate limiting */ +int sysctl_tcp_challenge_ack_limit = 100; + int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; @@ -3037,13 +3040,14 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int newly_acked_sacked, bool is_dupack, + int prior_sacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); + int newly_acked_sacked = 0; int fast_rexmit = 0, mib_idx; if (WARN_ON(!tp->packets_out && tp->sacked_out)) @@ -3103,6 +3107,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: if (flag & FLAG_DATA_ACKED) @@ -3124,6 +3129,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (is_dupack) tcp_add_reno_sack(sk); } + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); @@ -3633,6 +3639,11 @@ static int tcp_process_frto(struct sock *sk, int flag) } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { + if (!tcp_packets_in_flight(tp)) { + tcp_enter_frto_loss(sk, 2, flag); + return true; + } + /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); @@ -3681,6 +3692,24 @@ static int tcp_process_frto(struct sock *sk, int flag) return 0; } +/* RFC 5961 7 [ACK Throttling] */ +static void tcp_send_challenge_ack(struct sock *sk) +{ + /* unprotected vars, we dont care of overwrites */ + static u32 challenge_timestamp; + static unsigned int challenge_count; + u32 now = jiffies / HZ; + + if (now != challenge_timestamp) { + challenge_timestamp = now; + challenge_count = 0; + } + if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); + tcp_send_ack(sk); + } +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@ -3695,14 +3724,19 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - int newly_acked_sacked = 0; int frto_cwnd = 0; /* If the ack is older than previous acks * then we can probably ignore it. */ - if (before(ack, prior_snd_una)) + if (before(ack, prior_snd_una)) { + /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ + if (before(ack, prior_snd_una - tp->max_window)) { + tcp_send_challenge_ack(sk); + return -1; + } goto old_ack; + } /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). @@ -3768,8 +3802,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = prior_packets - tp->packets_out; - newly_acked_sacked = (prior_packets - prior_sacked) - - (tp->packets_out - tp->sacked_out); if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); @@ -3783,7 +3815,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) @@ -3798,7 +3830,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than @@ -3818,8 +3850,7 @@ old_ack: */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - newly_acked_sacked = tp->sacked_out - prior_sacked; - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } @@ -5271,8 +5302,8 @@ out: /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ -static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, int syn_inerr) +static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, int syn_inerr) { const u8 *hash_location; struct tcp_sock *tp = tcp_sk(sk); @@ -5297,38 +5328,48 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ - if (!th->rst) + if (!th->rst) { + if (th->syn) + goto syn_challenge; tcp_send_dupack(sk, skb); + } goto discard; } /* Step 2: check RST bit */ if (th->rst) { - tcp_reset(sk); + /* RFC 5961 3.2 : + * If sequence number exactly matches RCV.NXT, then + * RESET the connection + * else + * Send a challenge ACK + */ + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) + tcp_reset(sk); + else + tcp_send_challenge_ack(sk); goto discard; } - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 3: check security and precedence [ignored] */ - /* step 4: Check for a SYN in window. */ - if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + /* step 4: Check for a SYN + * RFC 5691 4.2 : Send a challenge ack + */ + if (th->syn) { +syn_challenge: if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); - tcp_reset(sk); - return -1; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); + tcp_send_challenge_ack(sk); + goto discard; } - return 1; + return true; discard: __kfree_skb(skb); - return 0; + return false; } /* @@ -5358,7 +5399,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); - int res; /* * Header prediction. @@ -5538,14 +5578,18 @@ slow_path: * Standard slow path. */ - res = tcp_validate_incoming(sk, skb, th, 1); - if (res <= 0) - return -res; + if (!tcp_validate_incoming(sk, skb, th, 1)) + return 0; step5: if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -5850,7 +5894,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int queued = 0; - int res; tp->rx_opt.saw_tstamp = 0; @@ -5905,9 +5948,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, return 0; } - res = tcp_validate_incoming(sk, skb, th, 0); - if (res <= 0) - return -res; + if (!tcp_validate_incoming(sk, skb, th, 0)) + return 0; /* step 5: check the ACK field */ if (th->ack) { @@ -6018,6 +6060,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0cb86ceb652f..76f50e1b53af 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -678,10 +678,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. - * routing might fail in this case. using iif for oif to - * make sure we can deliver it + * routing might fail in this case. No choice here, if we choose to force + * input interface, we will misroute in case of asymmetric route. */ - arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); + if (sk) + arg.bound_dev_if = sk->sk_bound_dev_if; net = dev_net(skb_dst(skb)->dev); arg.tos = ip_hdr(skb)->tos; @@ -1523,10 +1524,8 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: - tcp_clear_xmit_timers(newsk); - tcp_cleanup_congestion_control(newsk); - bh_unlock_sock(newsk); - sock_put(newsk); + inet_csk_prepare_forced_close(newsk); + tcp_done(newsk); goto exit; } EXPORT_SYMBOL(tcp_v4_syn_recv_sock); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7ac6423117ad..2d27e1af9303 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk) * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, - unsigned int mss_now, unsigned int cwnd) + unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); - u32 needed, window, cwnd_len; + u32 needed, window, max_len; window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; - cwnd_len = mss_now * cwnd; + max_len = mss_now * max_segs; - if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) - return cwnd_len; + if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) + return max_len; needed = min(skb->len, window); - if (cwnd_len <= needed) - return cwnd_len; + if (max_len <= needed) + return max_len; return needed - needed % mss_now; } @@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ - if (limit >= sk->sk_gso_max_size) + if (limit >= min_t(unsigned int, sk->sk_gso_max_size, + sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ @@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, limit = mss_now; if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, - cwnd_quota); + min_t(unsigned int, + cwnd_quota, + sk->sk_gso_max_segs)); if (skb->len > limit && unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7d5cb975cc6f..81e0ad2442c7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -493,8 +493,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf) struct net_device *dev; struct inet6_dev *idev; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { idev = __in6_dev_get(dev); if (idev) { int changed = (!idev->cnf.forwarding) ^ (!newf); @@ -503,7 +502,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf) dev_forward_change(idev); } } - rcu_read_unlock(); } static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) @@ -795,10 +793,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) struct in6_addr prefix; struct rt6_info *rt; struct net *net = dev_net(ifp->idev->dev); + struct flowi6 fl6 = {}; + ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); - rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); + fl6.flowi6_oif = ifp->idev->dev->ifindex; + fl6.daddr = prefix; + rt = (struct rt6_info *)ip6_route_lookup(net, &fl6, + RT6_LOOKUP_F_IFACE); - if (rt && addrconf_is_prefix_route(rt)) { + if (rt != net->ipv6.ip6_null_entry && + addrconf_is_prefix_route(rt)) { if (onlink == 0) { ip6_del_rt(rt); rt = NULL; @@ -1732,7 +1736,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, continue; if ((rt->rt6i_flags & flags) != flags) continue; - if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0)) + if ((rt->rt6i_flags & noflags) != 0) continue; dst_hold(&rt->dst); break; @@ -3091,14 +3095,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) struct hlist_node *n; hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], addr_lst) { + if (!net_eq(dev_net(ifa->idev->dev), net)) + continue; /* sync with offset */ if (p < state->offset) { p++; continue; } state->offset++; - if (net_eq(dev_net(ifa->idev->dev), net)) - return ifa; + return ifa; } /* prepare for next bucket */ @@ -3116,18 +3121,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct hlist_node *n = &ifa->addr_lst; hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { + if (!net_eq(dev_net(ifa->idev->dev), net)) + continue; state->offset++; - if (net_eq(dev_net(ifa->idev->dev), net)) - return ifa; + return ifa; } while (++state->bucket < IN6_ADDR_HSIZE) { state->offset = 0; hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], addr_lst) { + if (!net_eq(dev_net(ifa->idev->dev), net)) + continue; state->offset++; - if (net_eq(dev_net(ifa->idev->dev), net)) - return ifa; + return ifa; } } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 92bb9cba5c39..c3a007dc37cd 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -818,6 +818,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); + if (IS_ERR(sn)) { + err = PTR_ERR(sn); + sn = NULL; + } if (!sn) { /* If it is failed, discard just allocated root, and then (in st_failure) stale node diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 13e5399b1cd9..ce661baa60cb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1287,10 +1287,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, cork->length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; - exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; + exthdrlen = (opt ? opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; - dst_exthdrlen = rt->dst.header_len; + dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; } else { rt = (struct rt6_info *)cork->dst; fl6 = &inet->cork.fl.u.ip6; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63dd1f89ed7d..34c1109d3468 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -828,6 +828,7 @@ pref_skip_coa: if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; + retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 7e1e0fbfef21..740c919e0b6c 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -84,28 +84,30 @@ static int mip6_mh_len(int type) static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) { - struct ip6_mh *mh; + struct ip6_mh _hdr; + const struct ip6_mh *mh; - if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || - !pskb_may_pull(skb, (skb_transport_offset(skb) + - ((skb_transport_header(skb)[1] + 1) << 3)))) + mh = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!mh) return -1; - mh = (struct ip6_mh *)skb_transport_header(skb); + if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len) + return -1; if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); - mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - - skb_network_header(skb))); + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + + skb_network_header_len(skb)); return -1; } if (mh->ip6mh_proto != IPPROTO_NONE) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", mh->ip6mh_proto); - mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - - skb_network_header(skb))); + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + + skb_network_header_len(skb)); return -1; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 176b469322ac..843d6ebc525e 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -593,7 +593,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; - struct in6_addr mcaddr; + struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT; idev = in6_dev_get(dev); if (!idev) @@ -601,7 +601,6 @@ static void ndisc_send_unsol_na(struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - addrconf_addr_solict_mult(&ifa->addr, &mcaddr); ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, /*router=*/ !!idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5bddea778840..3ee28700de4c 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -107,21 +107,20 @@ found: * 0 - deliver * 1 - block */ -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) { - struct icmp6hdr *icmph; - struct raw6_sock *rp = raw6_sk(sk); - - if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { - __u32 *data = &rp->filter.data[0]; - int bit_nr; + struct icmp6hdr *_hdr; + const struct icmp6hdr *hdr; - icmph = (struct icmp6hdr *) skb->data; - bit_nr = icmph->icmp6_type; + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (hdr) { + const __u32 *data = &raw6_sk(sk)->filter.data[0]; + unsigned int type = hdr->icmp6_type; - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; + return (data[type >> 5] & (1U << (type & 31))) != 0; } - return 0; + return 1; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4920ca83f5f..493490f052a0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -203,7 +203,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { }; static const u32 ip6_template_metrics[RTAX_MAX] = { - [RTAX_HOPLIMIT - 1] = 255, + [RTAX_HOPLIMIT - 1] = 0, }; static struct rt6_info ip6_null_entry_template = { @@ -846,7 +846,8 @@ restart: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); - if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) + if (!dst_get_neighbour_noref_raw(&rt->dst) && + !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); else if (!(rt->dst.flags & DST_HOST)) nrt = rt6_alloc_clone(rt, &fl6->daddr); @@ -1135,7 +1136,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, rt->rt6i_dst.addr = fl6->daddr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; - dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); + dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); spin_lock_bh(&icmp6_dst_lock); rt->dst.next = icmp6_dst_gc_list; @@ -1485,17 +1486,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); - if (rt == net->ipv6.ip6_null_entry) - return -ENOENT; + if (rt == net->ipv6.ip6_null_entry) { + err = -ENOENT; + goto out; + } table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); - err = fib6_del(rt, info); - dst_release(&rt->dst); - write_unlock_bh(&table->tb6_lock); +out: + dst_release(&rt->dst); return err; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 98256cf72f9d..3889e0204183 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -896,7 +896,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; - fl6.flowi6_oif = inet6_iif(skb); + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = inet6_iif(skb); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); @@ -1410,7 +1411,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, #endif if (__inet_inherit_port(sk, newsk) < 0) { - sock_put(newsk); + inet_csk_prepare_forced_close(newsk); + tcp_done(newsk); goto out; } __inet6_hash(newsk, NULL); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 89ff8c67943e..7501b22b9c59 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1253,11 +1253,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) /* Remove from tunnel list */ spin_lock_bh(&pn->l2tp_tunnel_list_lock); list_del_rcu(&tunnel->list); + kfree_rcu(tunnel, rcu); spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - synchronize_rcu(); atomic_dec(&l2tp_tunnel_count); - kfree(tunnel); } /* Create a socket for the tunnel, if one isn't set up by diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a16a48e79fab..439379484bfc 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg { struct l2tp_tunnel { int magic; /* Should be L2TP_TUNNEL_MAGIC */ + struct rcu_head rcu; rwlock_t hlist_lock; /* protect session_hlist */ struct hlist_head session_hlist[L2TP_HASH_SIZE]; /* hashed list of sessions, diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 7446038e6b42..ab9a293ad034 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, printk("\n"); } - if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) + if (!pskb_may_pull(skb, ETH_HLEN)) goto error; secpath_reset(skb); @@ -269,6 +269,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p out_del_dev: free_netdev(dev); + spriv->dev = NULL; out_del_session: l2tp_session_delete(session); out: diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b9bef2c75026..df08d7779e1d 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -971,14 +971,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); - int rc = 0; + int rc = -EBADF; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; *uaddrlen = sizeof(sllc); - memset(uaddr, 0, *uaddrlen); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 677d65929780..944334869596 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -151,7 +151,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, sta = sta_info_get(sdata, mac_addr); else sta = sta_info_get_bss(sdata, mac_addr); - if (!sta) { + /* + * The ASSOC test makes sure the driver is ready to + * receive the key. When wpa_supplicant has roamed + * using FT, it attempts to set the key before + * association has completed, this rejects that attempt + * so it will set the key again after assocation. + * + * TODO: accept the key if we have a station entry and + * add it to the device after the station. + */ + if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { ieee80211_key_free(sdata->local, key); err = -ENOENT; goto out_unlock; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index cef7c29214a8..50191a30207c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -664,8 +664,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " "IBSS networks with same SSID (merge)\n", sdata->name); - ieee80211_request_internal_scan(sdata, - ifibss->ssid, ifibss->ssid_len, NULL); + ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, + NULL); } static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) @@ -772,9 +772,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " "join\n", sdata->name); - ieee80211_request_internal_scan(sdata, - ifibss->ssid, ifibss->ssid_len, - ifibss->fixed_channel ? ifibss->channel : NULL); + ieee80211_request_ibss_scan(sdata, ifibss->ssid, + ifibss->ssid_len, chan); } else { int interval = IEEE80211_SCAN_INTERVAL; @@ -1110,7 +1109,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; sdata->u.ibss.ibss_join_req = jiffies; - memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); + memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; mutex_unlock(&sdata->u.ibss.mtx); @@ -1153,10 +1152,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->u.ibss.mtx); - sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; - memset(sdata->u.ibss.bssid, 0, ETH_ALEN); - sdata->u.ibss.ssid_len = 0; - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -1177,6 +1172,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) } } + ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + memset(ifibss->bssid, 0, ETH_ALEN); + ifibss->ssid_len = 0; + sta_info_flush(sdata->local, sdata); spin_lock_bh(&ifibss->incomplete_lock); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a68ef6219833..e4b6910d745d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1239,9 +1239,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, /* scan/BSS handling */ void ieee80211_scan_work(struct work_struct *work); -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, - const u8 *ssid, u8 ssid_len, - struct ieee80211_channel *chan); +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len, + struct ieee80211_channel *chan); int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); @@ -1270,10 +1270,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata); void ieee80211_sched_scan_stopped_work(struct work_struct *work); /* off-channel helpers */ -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, - bool offchannel_ps_enable); -void ieee80211_offchannel_return(struct ieee80211_local *local, - bool offchannel_ps_disable); +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); +void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_hw_roc_setup(struct ieee80211_local *local); /* interface handling */ @@ -1303,6 +1301,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs); /* HT */ bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 25be6831ff09..abc31d7bd2a5 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3232,6 +3232,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, goto out_unlock; err_clear: + memset(ifmgd->bssid, 0, ETH_ALEN); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->auth_data = NULL; err_free: kfree(auth_data); @@ -3410,6 +3412,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, err = 0; goto out; err_clear: + memset(ifmgd->bssid, 0, ETH_ALEN); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 935aa4b6deee..c22f0748f4f6 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -103,8 +103,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) ieee80211_sta_reset_conn_monitor(sdata); } -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, - bool offchannel_ps_enable) +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; @@ -129,8 +128,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { netif_tx_stop_all_queues(sdata->dev); - if (offchannel_ps_enable && - (sdata->vif.type == NL80211_IFTYPE_STATION) && + if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.associated) ieee80211_offchannel_ps_enable(sdata, true); } @@ -138,8 +136,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, mutex_unlock(&local->iflist_mtx); } -void ieee80211_offchannel_return(struct ieee80211_local *local, - bool offchannel_ps_disable) +void ieee80211_offchannel_return(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; @@ -152,11 +149,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, continue; /* Tell AP we're back */ - if (offchannel_ps_disable && - sdata->vif.type == NL80211_IFTYPE_STATION) { - if (sdata->u.mgd.associated) - ieee80211_offchannel_ps_disable(sdata); - } + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.associated) + ieee80211_offchannel_ps_disable(sdata); if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { /* diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c9b508ea9d6b..8ce9feb13010 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -513,6 +513,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) if (ieee80211_is_action(hdr->frame_control)) { u8 category; + + /* make sure category field is present */ + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_MONITOR; + mgmt = (struct ieee80211_mgmt *)hdr; category = mgmt->u.action.category; if (category != WLAN_CATEGORY_MESH_ACTION && @@ -869,14 +874,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) */ if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && ieee80211_is_data_present(hdr->frame_control)) { - u16 ethertype; - u8 *payload; - - payload = rx->skb->data + - ieee80211_hdrlen(hdr->frame_control); - ethertype = (payload[6] << 8) | payload[7]; - if (cpu_to_be16(ethertype) == - rx->sdata->control_port_protocol) + unsigned int hdrlen; + __be16 ethertype; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (rx->skb->len < hdrlen + 8) + return RX_DROP_MONITOR; + + skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); + if (ethertype == rx->sdata->control_port_protocol) return RX_CONTINUE; } @@ -1465,11 +1472,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) hdr = (struct ieee80211_hdr *)rx->skb->data; fc = hdr->frame_control; + + if (ieee80211_is_ctl(fc)) + return RX_CONTINUE; + sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || - (rx->skb)->len < 24 || is_multicast_ether_addr(hdr->addr1))) { /* not fragmented */ goto out; @@ -1892,6 +1902,20 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); + + /* make sure fixed part of mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, hdrlen + 6)) + return RX_DROP_MONITOR; + + mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + + /* make sure full mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, + hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) + return RX_DROP_MONITOR; + + /* reload pointers */ + hdr = (struct ieee80211_hdr *) skb->data; mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); /* frame is in RMC, don't forward */ @@ -1900,7 +1924,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) return RX_DROP_MONITOR; - if (!ieee80211_is_data(hdr->frame_control)) + if (!ieee80211_is_data(hdr->frame_control) || + !(status->rx_flags & IEEE80211_RX_RA_MATCH)) return RX_CONTINUE; if (!mesh_hdr->ttl) @@ -1914,9 +1939,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (is_multicast_ether_addr(hdr->addr1)) { mpp_addr = hdr->addr3; proxied_addr = mesh_hdr->eaddr1; - } else { + } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { + /* has_a4 already checked in ieee80211_rx_mesh_check */ mpp_addr = hdr->addr4; proxied_addr = mesh_hdr->eaddr2; + } else { + return RX_DROP_MONITOR; } rcu_read_lock(); @@ -1944,9 +1972,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) } skb_set_queue_mapping(skb, q); - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) - goto out; - if (!--mesh_hdr->ttl) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); return RX_DROP_MONITOR; @@ -2361,6 +2386,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) } break; case WLAN_CATEGORY_SELF_PROTECTED: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.self_prot.action_code))) + break; + switch (mgmt->u.action.u.self_prot.action_code) { case WLAN_SP_MESH_PEERING_OPEN: case WLAN_SP_MESH_PEERING_CLOSE: @@ -2379,6 +2408,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) } break; case WLAN_CATEGORY_MESH_ACTION: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.mesh_action.action_code))) + break; + if (!ieee80211_vif_is_mesh(&sdata->vif)) break; if (mesh_action_is_path_sel(mgmt) && @@ -2927,10 +2960,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, test_bit(SCAN_SW_SCANNING, &local->scanning))) status->rx_flags |= IEEE80211_RX_IN_SCAN; - if (ieee80211_is_mgmt(fc)) - err = skb_linearize(skb); - else + if (ieee80211_is_mgmt(fc)) { + /* drop frame if too short for header */ + if (skb->len < ieee80211_hdrlen(fc)) + err = -ENOBUFS; + else + err = skb_linearize(skb); + } else { err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); + } if (err) { dev_kfree_skb(skb); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 9f8412c48960..e7e0e368cc34 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -329,7 +329,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, if (!was_hw_scan) { ieee80211_configure_filter(local); drv_sw_scan_complete(local); - ieee80211_offchannel_return(local, true); + ieee80211_offchannel_return(local); } ieee80211_recalc_idle(local); @@ -374,7 +374,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; - ieee80211_offchannel_stop_vifs(local, true); + ieee80211_offchannel_stop_vifs(local); ieee80211_configure_filter(local); @@ -653,12 +653,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local, local->scan_channel = NULL; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - /* - * Re-enable vifs and beaconing. Leave PS - * in off-channel state..will put that back - * on-channel at the end of scanning. - */ - ieee80211_offchannel_return(local, false); + /* disable PS */ + ieee80211_offchannel_return(local); #ifndef CONFIG_MAC80211_SCAN_ABORT *next_delay = HZ / 5; @@ -679,8 +675,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local, static void ieee80211_scan_state_resume(struct ieee80211_local *local, unsigned long *next_delay) { - /* PS already is in off-channel mode */ - ieee80211_offchannel_stop_vifs(local, false); + ieee80211_offchannel_stop_vifs(local); if (local->ops->flush) { drv_flush(local, false); @@ -804,9 +799,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, return res; } -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, - const u8 *ssid, u8 ssid_len, - struct ieee80211_channel *chan) +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len, + struct ieee80211_channel *chan) { struct ieee80211_local *local = sdata->local; int ret = -EBUSY; @@ -820,22 +815,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, /* fill internal scan request */ if (!chan) { - int i, nchan = 0; + int i, max_n; + int n_ch = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; - for (i = 0; - i < local->hw.wiphy->bands[band]->n_channels; - i++) { - local->int_scan_req->channels[nchan] = + + max_n = local->hw.wiphy->bands[band]->n_channels; + for (i = 0; i < max_n; i++) { + struct ieee80211_channel *tmp_ch = &local->hw.wiphy->bands[band]->channels[i]; - nchan++; + + if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_DISABLED)) + continue; + + local->int_scan_req->channels[n_ch] = tmp_ch; + n_ch++; } } - local->int_scan_req->n_channels = nchan; + if (WARN_ON_ONCE(n_ch == 0)) + goto unlock; + + local->int_scan_req->n_channels = n_ch; } else { + if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_DISABLED))) + goto unlock; + local->int_scan_req->channels[0] = chan; local->int_scan_req->n_channels = 1; } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d93d39b84344..e2e0e0bc6622 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -738,8 +738,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->tx_filtered[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); } #ifdef CONFIG_MAC80211_MESH @@ -774,7 +774,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); if (!tid_tx) continue; - __skb_queue_purge(&tid_tx->pending); + ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } @@ -844,7 +844,7 @@ void sta_info_init(struct ieee80211_local *local) void sta_info_stop(struct ieee80211_local *local) { - del_timer(&local->sta_cleanup); + del_timer_sync(&local->sta_cleanup); sta_info_flush(local, NULL); } @@ -959,6 +959,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) struct ieee80211_local *local = sdata->local; struct sk_buff_head pending; int filtered = 0, buffered = 0, ac; + unsigned long flags; clear_sta_flag(sta, WLAN_STA_SP); @@ -974,12 +975,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; + spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); + spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); tmp = skb_queue_len(&pending); filtered += tmp - count; count = tmp; + spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); + spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); tmp = skb_queue_len(&pending); buffered += tmp - count; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5f8f89e89d6b..47b117f3f567 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -660,3 +660,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ieee80211_free_txskb); + +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skbs))) + ieee80211_free_txskb(hw, skb); +} diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e76facc69e95..eace7664c805 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1357,7 +1357,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) if (tx->skb) dev_kfree_skb(tx->skb); else - __skb_queue_purge(&tx->skbs); + ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -2126,10 +2126,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { + struct sk_buff *skb; int i; - for (i = 0; i < local->hw.queues; i++) - skb_queue_purge(&local->pending[i]); + for (i = 0; i < local->hw.queues; i++) { + while ((skb = skb_dequeue(&local->pending[i])) != NULL) + ieee80211_free_txskb(&local->hw, skb); + } } /* diff --git a/net/mac80211/util.c b/net/mac80211/util.c index eb9d7c0529b6..73ef163a0391 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -592,13 +592,38 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, break; } - if (id != WLAN_EID_VENDOR_SPECIFIC && - id != WLAN_EID_QUIET && - test_bit(id, seen_elems)) { - elems->parse_error = true; - left -= elen; - pos += elen; - continue; + switch (id) { + case WLAN_EID_SSID: + case WLAN_EID_SUPP_RATES: + case WLAN_EID_FH_PARAMS: + case WLAN_EID_DS_PARAMS: + case WLAN_EID_CF_PARAMS: + case WLAN_EID_TIM: + case WLAN_EID_IBSS_PARAMS: + case WLAN_EID_CHALLENGE: + case WLAN_EID_RSN: + case WLAN_EID_ERP_INFO: + case WLAN_EID_EXT_SUPP_RATES: + case WLAN_EID_HT_CAPABILITY: + case WLAN_EID_MESH_ID: + case WLAN_EID_MESH_CONFIG: + case WLAN_EID_PEER_MGMT: + case WLAN_EID_PREQ: + case WLAN_EID_PREP: + case WLAN_EID_PERR: + case WLAN_EID_RANN: + case WLAN_EID_CHANNEL_SWITCH: + case WLAN_EID_EXT_CHANSWITCH_ANN: + case WLAN_EID_COUNTRY: + case WLAN_EID_PWR_CONSTRAINT: + case WLAN_EID_TIMEOUT_INTERVAL: + if (test_bit(id, seen_elems)) { + elems->parse_error = true; + left -= elen; + pos += elen; + continue; + } + break; } if (calc_crc && id < 64 && (filter & (1ULL << id))) @@ -1316,6 +1341,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; + if (!sdata->u.mgd.associated) + continue; ieee80211_send_nullfunc(local, sdata, 0); } diff --git a/net/mac80211/work.c b/net/mac80211/work.c index c6e230efa049..a74f53894d69 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -148,7 +148,7 @@ static void ieee80211_work_work(struct work_struct *work) } if (!started && !local->tmp_channel) { - ieee80211_offchannel_stop_vifs(local, true); + ieee80211_offchannel_stop_vifs(local); local->tmp_channel = wk->chan; local->tmp_channel_type = wk->chan_type; @@ -220,7 +220,7 @@ static void ieee80211_work_work(struct work_struct *work) local->tmp_channel = NULL; ieee80211_hw_config(local, 0); - ieee80211_offchannel_return(local, true); + ieee80211_offchannel_return(local); /* give connection some time to breathe */ run_again(local, jiffies + HZ/2); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 0ae23c60968c..ea6d03bd5d5f 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; - if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) goto update_iv; return RX_CONTINUE; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index f5589987fc80..cbc5bfd8c8e4 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = ptr; struct net *net = dev_net(dev); + struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; struct ip_vs_dest *dest; unsigned int idx; - if (event != NETDEV_UNREGISTER) + if (event != NETDEV_UNREGISTER || !ipvs) return NOTIFY_DONE; IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); EnterFunction(2); @@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, } } - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { + list_for_each_entry(dest, &ipvs->dest_trash, n_list) { __ip_vs_dev_reset(dest, dev); } mutex_unlock(&__ip_vs_mutex); @@ -2713,6 +2714,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct ip_vs_timeout_user t; + memset(&t, 0, sizeof(t)); __ip_vs_get_timeouts(net, &t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 729f157a0efa..9a171b2445b1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { /* bad luck, let's retry again */ - ct->timeout.expires = jiffies + + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); return; } /* we've got the event delivered, now it's dying */ @@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack) void nf_ct_insert_dying_list(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); /* add this conntrack to the dying list */ spin_lock_bh(&nf_conntrack_lock); @@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); /* set a new timer to retry event delivery */ - setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); - ct->timeout.expires = jiffies + + setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); } EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 4147ba3f653c..e41ec849120a 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master, } } -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ - struct nf_conn_help *master_help = nfct_help(i->master); - const struct nf_conntrack_expect_policy *p; - - if (!del_timer(&i->timeout)) - return 0; - - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[i->class]; - i->timeout.expires = jiffies + p->timeout * HZ; - add_timer(&i->timeout); - return 1; -} - static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) { const struct nf_conntrack_expect_policy *p; @@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct nf_conn_help *master_help = nfct_help(master); struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(expect); - struct hlist_node *n; + struct hlist_node *n, *next; unsigned int h; int ret = 1; @@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) goto out; } h = nf_ct_expect_dst_hash(&expect->tuple); - hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; + if (del_timer(&i->timeout)) { + nf_ct_unlink_expect(i); + nf_ct_expect_put(i); + break; } } else if (expect_clash(i, expect)) { ret = -EBUSY; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 0d07a1dcf605..e0221238e749 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { * sCL -> sSS */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ -/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, +/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR }, /* * sNO -> sIV Too late and no reason to do anything * sSS -> sIV Client can't send SYN and then SYN/ACK * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open - * sSR -> sIG - * sES -> sIG Error: SYNs in window outside the SYN_SENT state - * are errors. Receiver will reply with RST - * and close the connection. - * Or we are not in sync and hold a dead connection. - * sFW -> sIG - * sCW -> sIG - * sLA -> sIG - * sTW -> sIG - * sCL -> sIG + * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open + * sES -> sIV Invalid SYN/ACK packets sent by the client + * sFW -> sIV + * sCW -> sIV + * sLA -> sIV + * sTW -> sIV + * sCL -> sIV */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, @@ -627,15 +624,9 @@ static bool tcp_in_window(const struct nf_conn *ct, ack = sack = receiver->td_end; } - if (seq == end - && (!tcph->rst - || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) + if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT) /* - * Packets contains no data: we assume it is valid - * and check the ack value only. - * However RST segments are always validated by their - * SEQ number, except when seq == 0 (reset sent answering - * SYN. + * RST sent answering SYN. */ seq = end = sender->td_end; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d95f9c963cde..2195eb0727a3 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) /* Precision saver. */ -static inline u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -400,7 +399,7 @@ user2credits(u_int32_t user) return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } -static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) { dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; if (dh->rateinfo.credit > dh->rateinfo.credit_cap) @@ -535,8 +534,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) dh->rateinfo.prev = jiffies; dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); - dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); + dh->rateinfo.credit_cap = dh->rateinfo.credit; dh->rateinfo.cost = user2credits(hinfo->cfg.avg); } else { /* update expiration timeout */ diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 32b7a579a032..a4c1e4528cac 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* Precision saver. */ -static u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par) /* For SMP, we only want to use one set of state. */ r->master = priv; + /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * + 128. */ + priv->prev = jiffies; + priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ if (r->cost == 0) { - /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * - 128. */ - priv->prev = jiffies; - priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ - r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ + r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } return 0; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 0ec8138aa470..c6f7db720d84 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -16,6 +16,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_set.h> +#include <linux/netfilter/ipset/ip_set_timeout.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); @@ -44,6 +45,14 @@ const struct ip_set_adt_opt n = { \ .cmdflags = cfs, \ .timeout = t, \ } +#define ADT_MOPT(n, f, d, fs, cfs, t) \ +struct ip_set_adt_opt n = { \ + .family = f, \ + .dim = d, \ + .flags = fs, \ + .cmdflags = cfs, \ + .timeout = t, \ +} /* Revision 0 interface: backward compatible with netfilter/iptables */ @@ -296,11 +305,15 @@ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; - ADT_OPT(add_opt, par->family, info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + ADT_MOPT(add_opt, par->family, info->add_set.dim, + info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); + /* Normalize to fit into jiffies */ + if (add_opt.timeout != IPSET_NO_TIMEOUT && + add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index faa48f70b7c9..9017e3ef8fee 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -137,6 +137,8 @@ static void netlink_destroy_callback(struct netlink_callback *cb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); +#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); + static ATOMIC_NOTIFIER_HEAD(netlink_chain); static inline u32 netlink_group_mask(u32 group) @@ -156,6 +158,8 @@ static void netlink_sock_destruct(struct sock *sk) if (nlk->cb) { if (nlk->cb->done) nlk->cb->done(nlk->cb); + + module_put(nlk->cb->module); netlink_destroy_callback(nlk->cb); } @@ -330,6 +334,11 @@ netlink_update_listeners(struct sock *sk) struct hlist_node *node; unsigned long mask; unsigned int i; + struct listeners *listeners; + + listeners = nl_deref_protected(tbl->listeners); + if (!listeners) + return; for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; @@ -337,7 +346,7 @@ netlink_update_listeners(struct sock *sk) if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) mask |= nlk_sk(sk)->groups[i]; } - tbl->listeners->masks[i] = mask; + listeners->masks[i] = mask; } /* this function is only called with the netlink table "grabbed", which * makes sure updates are visible before bind or setsockopt return. */ @@ -518,7 +527,11 @@ static int netlink_release(struct socket *sock) if (netlink_is_kernel(sk)) { BUG_ON(nl_table[sk->sk_protocol].registered == 0); if (--nl_table[sk->sk_protocol].registered == 0) { - kfree(nl_table[sk->sk_protocol].listeners); + struct listeners *old; + + old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); + RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); + kfree_rcu(old, rcu); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } @@ -948,7 +961,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group) rcu_read_lock(); listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); - if (group - 1 < nl_table[sk->sk_protocol].groups) + if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) res = test_bit(group - 1, listeners->masks); rcu_read_unlock(); @@ -1329,7 +1342,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &scm; - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; @@ -1340,7 +1353,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) + if ((dst_group || dst_pid) && + !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; @@ -1579,7 +1593,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups) new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM; - old = rcu_dereference_protected(tbl->listeners, 1); + old = nl_deref_protected(tbl->listeners); memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); rcu_assign_pointer(tbl->listeners, new); @@ -1727,6 +1741,7 @@ static int netlink_dump(struct sock *sk) nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); + module_put(cb->module); netlink_destroy_callback(cb); return 0; @@ -1736,9 +1751,9 @@ errout_skb: return err; } -int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, - const struct nlmsghdr *nlh, - struct netlink_dump_control *control) +int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control) { struct netlink_callback *cb; struct sock *sk; @@ -1753,6 +1768,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->done = control->done; cb->nlh = nlh; cb->data = control->data; + cb->module = control->module; cb->min_dump_alloc = control->min_dump_alloc; atomic_inc(&skb->users); cb->skb = skb; @@ -1763,19 +1779,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, return -ECONNREFUSED; } nlk = nlk_sk(sk); - /* A dump is in progress... */ + mutex_lock(nlk->cb_mutex); + /* A dump is in progress... */ if (nlk->cb) { mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); - sock_put(sk); - return -EBUSY; + ret = -EBUSY; + goto out; } + /* add reference of module which cb->dump belongs to */ + if (!try_module_get(cb->module)) { + mutex_unlock(nlk->cb_mutex); + netlink_destroy_callback(cb); + ret = -EPROTONOSUPPORT; + goto out; + } + nlk->cb = cb; mutex_unlock(nlk->cb_mutex); ret = netlink_dump(sk); - +out: sock_put(sk); if (ret) @@ -1786,7 +1811,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, */ return -EINTR; } -EXPORT_SYMBOL(netlink_dump_start); +EXPORT_SYMBOL(__netlink_dump_start); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) { @@ -2115,6 +2140,7 @@ static void __init netlink_add_usersock_entry(void) rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; + nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; netlink_table_ungrab(); } diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 06592d8b4a2b..1b9024ee963c 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (er < 0) { + skb_free_datagram(sk, skb); + release_sock(sk); + return er; + } if (sax != NULL) { sax->sax25_family = AF_NETROM; diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 17a578f641f1..c40112c39e15 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -966,7 +966,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) local->remote_lto = LLCP_DEFAULT_LTO; local->remote_rw = LLCP_DEFAULT_RW; - list_add(&llcp_devices, &local->list); + list_add(&local->list, &llcp_devices); return 0; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index b6b1d7daa3cb..ce5348f5f601 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -24,6 +24,9 @@ #include <linux/ethtool.h> #include <linux/skbuff.h> +#include <net/dst.h> +#include <net/xfrm.h> + #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" @@ -209,6 +212,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) int len; len = skb->len; + + skb_dst_drop(skb); + nf_reset(skb); + secpath_reset(skb); + skb->dev = netdev; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, netdev); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4f2c0df79563..38ca5e07d520 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1280,6 +1280,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) spin_unlock(&f->lock); } +bool match_fanout_group(struct packet_type *ptype, struct sock * sk) +{ + if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) + return true; + + return false; +} + static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_sock *po = pkt_sk(sk); @@ -1332,6 +1340,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.id_match = match_fanout_group; dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } @@ -1943,7 +1952,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb) if (likely(po->tx_ring.pg_vec)) { ph = skb_shinfo(skb)->destructor_arg; - BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); BUG_ON(atomic_read(&po->tx_ring.pending) == 0); atomic_dec(&po->tx_ring.pending); __packet_set_status(po, ph, TP_STATUS_AVAILABLE); @@ -2442,13 +2450,15 @@ static int packet_release(struct socket *sock) packet_flush_mclist(sk); - memset(&req_u, 0, sizeof(req_u)); - - if (po->rx_ring.pg_vec) + if (po->rx_ring.pg_vec) { + memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); + } - if (po->tx_ring.pg_vec) + if (po->tx_ring.pg_vec) { + memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); + } fanout_release(sk); diff --git a/net/rds/recv.c b/net/rds/recv.c index 5c6e9f132026..9f0f17cf6bf9 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo); + msg->msg_namelen = 0; + if (msg_flags & MSG_OOB) goto out; @@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, sin->sin_port = inc->i_hdr.h_sport; sin->sin_addr.s_addr = inc->i_saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + msg->msg_namelen = sizeof(*sin); } break; } diff --git a/net/rds/send.c b/net/rds/send.c index 96531d4033a2..88eace57dd6b 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) rds_stats_inc(s_send_pong); if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - rds_send_xmit(conn); + queue_delayed_work(rds_wq, &conn->c_send_w, 0); rds_message_put(rm); return 0; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index b77f5a06a658..bdacd8df318c 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, struct tcf_common *pc; int ret = 0; int err; +#ifdef CONFIG_GACT_PROB + struct tc_gact_p *p_parm = NULL; +#endif if (nla == NULL) return -EINVAL; @@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; +#else + if (tb[TCA_GACT_PROB]) { + p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm->ptype >= MAX_RAND) + return -EINVAL; + } #endif pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); @@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, spin_lock_bh(&gact->tcf_lock); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB - if (tb[TCA_GACT_PROB] != NULL) { - struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = p_parm->pval; gact->tcfg_ptype = p_parm->ptype; @@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, spin_lock(&gact->tcf_lock); #ifdef CONFIG_GACT_PROB - if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) + if (gact->tcfg_ptype) action = gact_rand[gact->tcfg_ptype](gact); else action = gact->tcf_action; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 24d94c097b35..599f67ada1ed 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) cl = defmap[TC_PRIO_BESTEFFORT]; - if (cl == NULL || cl->level >= head->level) + if (cl == NULL) goto fallback; } - + if (cl->level >= head->level) + goto fallback; #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 29b942ce9e82..f08b9166119b 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -876,7 +876,7 @@ ok: q->now = psched_get_time(); start_at = jiffies; - next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; + next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC; for (level = 0; level < TC_HTB_MAXDEPTH; level++) { /* common case optimization - skip event handler quickly */ diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e68cb440756a..cdd474afbbd2 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -830,7 +830,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) if (mask) { struct qfq_group *next = qfq_ffs(q, mask); if (qfq_gt(roundedF, next->F)) { - cl->S = next->F; + if (qfq_gt(limit, next->F)) + cl->S = next->F; + else /* preserve timestamp correctness */ + cl->S = limit; return; } } diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 6c8556459a75..0018b653cb1f 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, msg = sctp_datamsg_new(GFP_KERNEL); if (!msg) - return NULL; + return ERR_PTR(-ENOMEM); /* Note: Calculate this outside of the loop, so that all fragments * have the same expiration. @@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } + err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); if (err < 0) - goto errout; + goto errout_chunk_free; offset += len; @@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto errout; + } err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov); @@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - (__u8 *)chunk->skb->data); if (err < 0) - goto errout; + goto errout_chunk_free; sctp_datamsg_assign(msg, chunk); list_add_tail(&chunk->frag_list, &msg->chunks); @@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, return msg; +errout_chunk_free: + sctp_chunk_free(chunk); + errout: list_for_each_safe(pos, temp, &msg->chunks) { list_del_init(pos); @@ -339,7 +347,7 @@ errout: sctp_chunk_free(chunk); } sctp_datamsg_put(msg); - return NULL; + return ERR_PTR(err); } /* Check whether this message has expired. */ diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 68a385d7c3bd..58cd035dcd20 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep) /* Final destructor for endpoint. */ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { + int i; + SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); /* Free up the HMAC transform. */ @@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) sctp_inq_free(&ep->base.inqueue); sctp_bind_addr_free(&ep->base.bind_addr); + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); + /* Remove and free the port */ if (sctp_sk(ep->base.sk)->bind_hash) sctp_put_port(ep->base.sk); diff --git a/net/sctp/output.c b/net/sctp/output.c index 8fc4dcd294ab..32ba8d0e50e2 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -334,6 +334,25 @@ finish: return retval; } +static void sctp_packet_release_owner(struct sk_buff *skb) +{ + sk_free(skb->sk); +} + +static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sctp_packet_release_owner; + + /* + * The data chunks have already been accounted for in sctp_sendmsg(), + * therefore only reserve a single byte to keep socket around until + * the packet has been transmitted. + */ + atomic_inc(&sk->sk_wmem_alloc); +} + /* All packets are sent to the network through this function from * sctp_outq_tail(). * @@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) /* Set the owning socket so that we know where to get the * destination IP address. */ - skb_set_owner_w(nskb, sk); + sctp_packet_set_owner_w(nskb, sk); if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index cfeb1d4a1ee6..96eb168a1f47 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) /* Free the outqueue structure and any related pending chunks. */ -void sctp_outq_teardown(struct sctp_outq *q) +static void __sctp_outq_teardown(struct sctp_outq *q) { struct sctp_transport *transport; struct list_head *lchunk, *temp; @@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q) sctp_chunk_free(chunk); } - q->error = 0; - /* Throw away any leftover control chunks. */ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { list_del_init(&chunk->list); @@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q) } } +void sctp_outq_teardown(struct sctp_outq *q) +{ + __sctp_outq_teardown(q); + sctp_outq_init(q->asoc, q); +} + /* Free the outqueue structure and any related pending chunks. */ void sctp_outq_free(struct sctp_outq *q) { /* Throw away leftover chunks. */ - sctp_outq_teardown(q); + __sctp_outq_teardown(q); /* If we were kmalloc()'d, free the memory. */ if (q->malloced) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 1ff51c9d18d5..2fdb05d8aac8 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1610,8 +1610,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, asoc->outqueue.outstanding_bytes; sackh.num_gap_ack_blocks = 0; sackh.num_dup_tsns = 0; + chunk->subh.sack_hdr = &sackh; sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, - SCTP_SACKH(&sackh)); + SCTP_CHUNK(chunk)); break; case SCTP_CMD_DISCARD_PACKET: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dba20d6e3247..9fd05edef190 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1908,8 +1908,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); - if (!datamsg) { - err = -ENOMEM; + if (IS_ERR(datamsg)) { + err = PTR_ERR(datamsg); goto out_free; } @@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk, ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); out: - kfree(authkey); + kzfree(authkey); return ret; } diff --git a/net/socket.c b/net/socket.c index 573b26152a30..dab317686ad3 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2605,7 +2605,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) - err = compat_put_timeval(up, &ktv); + err = compat_put_timeval(&ktv, up); return err; } @@ -2621,7 +2621,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) - err = compat_put_timespec(up, &kts); + err = compat_put_timespec(&kts, up); return err; } @@ -2658,6 +2658,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; + memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index de0b0f39d9d8..76cb304f3f1a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1406,11 +1406,11 @@ static ssize_t read_flush(struct file *file, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { - char tbuf[20]; + char tbuf[22]; unsigned long p = *ppos; size_t len; - sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); + snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time)); len = strlen(tbuf); if (p >= len) return 0; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 57f2731d957a..a28a2111297e 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -236,7 +236,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { if (clnt->cl_program->pipe_dir_name == NULL) - break; + continue; if (rpc_clnt_skip_event(clnt, event)) continue; if (atomic_inc_not_zero(&clnt->cl_count) == 0) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index faa078f74b27..b8bda4434c05 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1157,14 +1157,19 @@ static void rpc_kill_sb(struct super_block *sb) struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); mutex_lock(&sn->pipefs_sb_lock); + if (sn->pipefs_sb != sb) { + mutex_unlock(&sn->pipefs_sb_lock); + goto out; + } sn->pipefs_sb = NULL; mutex_unlock(&sn->pipefs_sb_lock); - put_net(net); dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, NET_NAME(net)); blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); + put_net(net); +out: kill_litter_super(sb); } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index eda32ae7dec4..85b9235fbee2 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -915,16 +915,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) return task; } +/* + * rpc_free_task - release rpc task and perform cleanups + * + * Note that we free up the rpc_task _after_ rpc_release_calldata() + * in order to work around a workqueue dependency issue. + * + * Tejun Heo states: + * "Workqueue currently considers two work items to be the same if they're + * on the same address and won't execute them concurrently - ie. it + * makes a work item which is queued again while being executed wait + * for the previous execution to complete. + * + * If a work function frees the work item, and then waits for an event + * which should be performed by another work item and *that* work item + * recycles the freed work item, it can create a false dependency loop. + * There really is no reliable way to detect this short of verifying + * every memory free." + * + */ static void rpc_free_task(struct rpc_task *task) { - const struct rpc_call_ops *tk_ops = task->tk_ops; - void *calldata = task->tk_calldata; + unsigned short tk_flags = task->tk_flags; + + rpc_release_calldata(task->tk_ops, task->tk_calldata); - if (task->tk_flags & RPC_TASK_DYNAMIC) { + if (tk_flags & RPC_TASK_DYNAMIC) { dprintk("RPC: %5u freeing task\n", task->tk_pid); mempool_free(task, rpc_task_mempool); } - rpc_release_calldata(tk_ops, calldata); } static void rpc_async_release(struct work_struct *work) @@ -934,8 +953,7 @@ static void rpc_async_release(struct work_struct *work) static void rpc_release_resources_task(struct rpc_task *task) { - if (task->tk_rqstp) - xprt_release(task); + xprt_release(task); if (task->tk_msg.rpc_cred) { put_rpccred(task->tk_msg.rpc_cred); task->tk_msg.rpc_cred = NULL; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 4bda09d7e1a4..aec7dbb9d3dd 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) */ void svc_xprt_enqueue(struct svc_xprt *xprt) { - struct svc_serv *serv = xprt->xpt_server; struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; @@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) rqstp, rqstp->rq_xprt); rqstp->rq_xprt = xprt; svc_xprt_get(xprt); - rqstp->rq_reserved = serv->sv_max_mesg; - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); pool->sp_stats.threads_woken++; wake_up(&rqstp->rq_wait); } else { @@ -643,8 +640,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (xprt) { rqstp->rq_xprt = xprt; svc_xprt_get(xprt); - rqstp->rq_reserved = serv->sv_max_mesg; - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); /* As there is a shortage of threads and this request * had to be queued, don't allow the thread to wait so @@ -741,6 +736,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) else len = xprt->xpt_ops->xpo_recvfrom(rqstp); dprintk("svc: got len=%d\n", len); + rqstp->rq_reserved = serv->sv_max_mesg; + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); } svc_xprt_received(xprt); @@ -797,7 +794,8 @@ int svc_send(struct svc_rqst *rqstp) /* Grab mutex to serialize outgoing data. */ mutex_lock(&xprt->xpt_mutex); - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) + if (test_bit(XPT_DEAD, &xprt->xpt_flags) + || test_bit(XPT_CLOSE, &xprt->xpt_flags)) len = -ENOTCONN; else len = xprt->xpt_ops->xpo_sendto(rqstp); @@ -819,7 +817,6 @@ static void svc_age_temp_xprts(unsigned long closure) struct svc_serv *serv = (struct svc_serv *)closure; struct svc_xprt *xprt; struct list_head *le, *next; - LIST_HEAD(to_be_aged); dprintk("svc_age_temp_xprts\n"); @@ -840,25 +837,15 @@ static void svc_age_temp_xprts(unsigned long closure) if (atomic_read(&xprt->xpt_ref.refcount) > 1 || test_bit(XPT_BUSY, &xprt->xpt_flags)) continue; - svc_xprt_get(xprt); - list_move(le, &to_be_aged); + list_del_init(le); set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_DETACHED, &xprt->xpt_flags); - } - spin_unlock_bh(&serv->sv_lock); - - while (!list_empty(&to_be_aged)) { - le = to_be_aged.next; - /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ - list_del_init(le); - xprt = list_entry(le, struct svc_xprt, xpt_list); - dprintk("queuing xprt %p for closing\n", xprt); /* a thread will dequeue and close it soon */ svc_xprt_enqueue(xprt); - svc_xprt_put(xprt); } + spin_unlock_bh(&serv->sv_lock); mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 824d32fb3121..f190ea96f112 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1137,9 +1137,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) if (len >= 0) svsk->sk_tcplen += len; if (len != want) { + svc_tcp_save_pages(svsk, rqstp); if (len < 0 && len != -EAGAIN) goto err_other; - svc_tcp_save_pages(svsk, rqstp); dprintk("svc: incomplete TCP record (%d of %d)\n", svsk->sk_tcplen, svsk->sk_reclen); goto err_noclose; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index da72492360b8..feea4741edda 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) return false; } -static void xprt_alloc_slot(struct rpc_task *task) +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) { - struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; + spin_lock(&xprt->reserve_lock); if (!list_empty(&xprt->free)) { req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); list_del(&req->rq_list); @@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task) default: task->tk_status = -EAGAIN; } + spin_unlock(&xprt->reserve_lock); return; out_init_req: task->tk_status = 0; task->tk_rqstp = req; xprt_request_init(task, xprt); + spin_unlock(&xprt->reserve_lock); +} +EXPORT_SYMBOL_GPL(xprt_alloc_slot); + +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) +{ + /* Note: grabbing the xprt_lock_write() ensures that we throttle + * new slot allocation if the transport is congested (i.e. when + * reconnecting a stream transport or when out of socket write + * buffer space). + */ + if (xprt_lock_write(xprt, task)) { + xprt_alloc_slot(xprt, task); + xprt_release_write(xprt, task); + } } +EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) { @@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task) if (task->tk_rqstp != NULL) return; - /* Note: grabbing the xprt_lock_write() here is not strictly needed, - * but ensures that we throttle new slot allocation if the transport - * is congested (e.g. if reconnecting or if we're out of socket - * write buffer space). - */ task->tk_timeout = 0; task->tk_status = -EAGAIN; - if (!xprt_lock_write(xprt, task)) - return; - - spin_lock(&xprt->reserve_lock); - xprt_alloc_slot(task); - spin_unlock(&xprt->reserve_lock); - xprt_release_write(xprt, task); + xprt->ops->alloc_slot(xprt, task); } static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) @@ -1133,10 +1139,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) void xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt; - struct rpc_rqst *req; + struct rpc_rqst *req = task->tk_rqstp; - if (!(req = task->tk_rqstp)) + if (req == NULL) { + if (task->tk_client) { + rcu_read_lock(); + xprt = rcu_dereference(task->tk_client->cl_xprt); + if (xprt->snd_task == task) + xprt_release_write(xprt, task); + rcu_read_unlock(); + } return; + } xprt = req->rq_xprt; if (task->tk_ops->rpc_count_stats != NULL) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 06cdbff79e4a..5d9202dc7cb1 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) static struct rpc_xprt_ops xprt_rdma_procs = { .reserve_xprt = xprt_rdma_reserve_xprt, .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ + .alloc_slot = xprt_alloc_slot, .release_request = xprt_release_rqst_cong, /* ditto */ .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b88c6bf657ba..79064471cd01 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -254,7 +254,6 @@ struct sock_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); - void (*old_error_report)(struct sock *); }; /* @@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task) dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); case -ECONNRESET: - case -EPIPE: xs_tcp_shutdown(xprt); case -ECONNREFUSED: case -ENOTCONN: + case -EPIPE: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } @@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) transport->old_data_ready = sk->sk_data_ready; transport->old_state_change = sk->sk_state_change; transport->old_write_space = sk->sk_write_space; - transport->old_error_report = sk->sk_error_report; } static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) @@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s sk->sk_data_ready = transport->old_data_ready; sk->sk_state_change = transport->old_state_change; sk->sk_write_space = transport->old_write_space; - sk->sk_error_report = transport->old_error_report; } static void xs_reset_transport(struct sock_xprt *transport) @@ -1028,6 +1025,16 @@ static void xs_udp_data_ready(struct sock *sk, int len) read_unlock_bh(&sk->sk_callback_lock); } +/* + * Helper function to force a TCP close if the server is sending + * junk and/or it has put us in CLOSE_WAIT + */ +static void xs_tcp_force_close(struct rpc_xprt *xprt) +{ + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); + xprt_force_disconnect(xprt); +} + static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); @@ -1054,7 +1061,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea /* Sanity check of the record length */ if (unlikely(transport->tcp_reclen < 8)) { dprintk("RPC: invalid TCP record fragment length\n"); - xprt_force_disconnect(xprt); + xs_tcp_force_close(xprt); return; } dprintk("RPC: reading TCP record fragment of length %d\n", @@ -1135,7 +1142,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, break; default: dprintk("RPC: invalid request message type\n"); - xprt_force_disconnect(&transport->xprt); + xs_tcp_force_close(&transport->xprt); } xs_tcp_check_fraghdr(transport); } @@ -1455,12 +1462,19 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) xprt_clear_connecting(xprt); } -static void xs_sock_mark_closed(struct rpc_xprt *xprt) +static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) { smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); + clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); smp_mb__after_clear_bit(); +} + +static void xs_sock_mark_closed(struct rpc_xprt *xprt) +{ + xs_sock_reset_connection_flags(xprt); /* Mark transport as closed and wake up all pending tasks */ xprt_disconnect_done(xprt); } @@ -1515,8 +1529,9 @@ static void xs_tcp_state_change(struct sock *sk) break; case TCP_CLOSE_WAIT: /* The server initiated a shutdown of the socket */ - xprt_force_disconnect(xprt); xprt->connect_cookie++; + clear_bit(XPRT_CONNECTED, &xprt->state); + xs_tcp_force_close(xprt); case TCP_CLOSING: /* * If the server closed down the connection, make sure that @@ -1540,25 +1555,6 @@ static void xs_tcp_state_change(struct sock *sk) read_unlock_bh(&sk->sk_callback_lock); } -/** - * xs_error_report - callback mainly for catching socket errors - * @sk: socket - */ -static void xs_error_report(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock_bh(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: %s client %p...\n" - "RPC: error %d\n", - __func__, xprt, sk->sk_err); - xprt_wake_pending_tasks(xprt, -EAGAIN); -out: - read_unlock_bh(&sk->sk_callback_lock); -} - static void xs_write_space(struct sock *sk) { struct socket *sock; @@ -1858,7 +1854,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_local_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; xprt_clear_connected(xprt); @@ -1947,7 +1942,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_no_check = UDP_CSUM_NORCV; sk->sk_allocation = GFP_ATOMIC; @@ -2015,10 +2009,8 @@ static void xs_abort_connection(struct sock_xprt *transport) any.sa_family = AF_UNSPEC; result = kernel_connect(transport->sock, &any, sizeof(any), 0); if (!result) - xs_sock_mark_closed(&transport->xprt); - else - dprintk("RPC: AF_UNSPEC connect return code %d\n", - result); + xs_sock_reset_connection_flags(&transport->xprt); + dprintk("RPC: AF_UNSPEC connect return code %d\n", result); } static void xs_tcp_reuse_connection(struct sock_xprt *transport) @@ -2063,7 +2055,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; /* socket options */ @@ -2159,8 +2150,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) /* We're probably in TIME_WAIT. Get rid of existing socket, * and retry */ - set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); - xprt_force_disconnect(xprt); + xs_tcp_force_close(xprt); break; case -ECONNREFUSED: case -ECONNRESET: @@ -2433,6 +2423,7 @@ static void bc_destroy(struct rpc_xprt *xprt) static struct rpc_xprt_ops xs_local_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, + .alloc_slot = xprt_alloc_slot, .rpcbind = xs_local_rpcbind, .set_port = xs_local_set_port, .connect = xs_connect, @@ -2449,6 +2440,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, .release_xprt = xprt_release_xprt_cong, + .alloc_slot = xprt_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, @@ -2466,6 +2458,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, + .alloc_slot = xprt_lock_and_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, @@ -2485,6 +2478,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { static struct rpc_xprt_ops bc_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xprt_release_xprt, + .alloc_slot = xprt_alloc_slot, .rpcbind = xs_local_rpcbind, .buf_alloc = bc_malloc, .buf_free = bc_free, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d510353ef431..109e30beaa69 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1446,7 +1446,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; @@ -1607,7 +1607,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; diff --git a/net/wireless/core.c b/net/wireless/core.c index bb5302dd592d..7917c74b25b2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -552,8 +552,7 @@ int wiphy_register(struct wiphy *wiphy) for (i = 0; i < sband->n_channels; i++) { sband->channels[i].orig_flags = sband->channels[i].flags; - sband->channels[i].orig_mag = - sband->channels[i].max_antenna_gain; + sband->channels[i].orig_mag = INT_MAX; sband->channels[i].orig_mpwr = sband->channels[i].max_power; sband->channels[i].band = band; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 460af03d8149..4dc83474db2e 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -134,9 +134,8 @@ static const struct ieee80211_regdomain world_regdom = { .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), - /* IEEE 802.11b/g, channels 12..13. No HT40 - * channel fits here. */ - REG_RULE(2467-10, 2472+10, 20, 6, 20, + /* IEEE 802.11b/g, channels 12..13. */ + REG_RULE(2467-10, 2472+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* IEEE 802.11 channel 14 - Only JP enables @@ -340,6 +339,9 @@ static void reg_regdb_search(struct work_struct *work) struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; + bool set_reg = false; + + mutex_lock(&cfg80211_mutex); mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@ -355,9 +357,7 @@ static void reg_regdb_search(struct work_struct *work) r = reg_copy_regd(®dom, curdom); if (r) break; - mutex_lock(&cfg80211_mutex); - set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + set_reg = true; break; } } @@ -365,6 +365,11 @@ static void reg_regdb_search(struct work_struct *work) kfree(request); } mutex_unlock(®_regdb_search_mutex); + + if (set_reg) + set_regdom(regdom); + + mutex_unlock(&cfg80211_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); diff --git a/net/wireless/util.c b/net/wireless/util.c index d835377b4da7..d22dce7b8b80 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -290,23 +290,21 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) } EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); -static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) +unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) { int ae = meshhdr->flags & MESH_FLAGS_AE; - /* 7.1.3.5a.2 */ + /* 802.11-2012, 8.2.4.7.3 */ switch (ae) { + default: case 0: return 6; case MESH_FLAGS_AE_A4: return 12; case MESH_FLAGS_AE_A5_A6: return 18; - case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): - return 24; - default: - return 6; } } +EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype) @@ -354,6 +352,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, /* make sure meshdr->flags is on the linear part */ if (!pskb_may_pull(skb, hdrlen + 1)) return -1; + if (meshdr->flags & MESH_FLAGS_AE_A4) + return -1; if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), @@ -378,6 +378,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, /* make sure meshdr->flags is on the linear part */ if (!pskb_may_pull(skb, hdrlen + 1)) return -1; + if (meshdr->flags & MESH_FLAGS_AE_A5_A6) + return -1; if (meshdr->flags & MESH_FLAGS_AE_A4) skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 54a0dc2e2f8d..ab2bb42fe094 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -212,7 +212,7 @@ resume: /* only the first xfrm gets the encap type */ encap_type = 0; - if (async && x->repl->check(x, skb, seq)) { + if (async && x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index a15d2a03172a..71c80c763d57 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, if (!afinfo) { dst_release(dst_orig); - ret = ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); } else { ret = afinfo->blackhole_route(net, dst_orig); } diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 2f6d11d04a2b..3efb07d3eb27 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -420,6 +420,18 @@ err: return -EINVAL; } +static int xfrm_replay_recheck_esn(struct xfrm_state *x, + struct sk_buff *skb, __be32 net_seq) +{ + if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi != + htonl(xfrm_replay_seqhi(x, net_seq)))) { + x->stats.replay_window++; + return -EINVAL; + } + + return xfrm_replay_check_esn(x, skb, net_seq); +} + static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; @@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) static struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, + .recheck = xfrm_replay_check, .notify = xfrm_replay_notify, .overflow = xfrm_replay_overflow, }; @@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = { static struct xfrm_replay xfrm_replay_bmp = { .advance = xfrm_replay_advance_bmp, .check = xfrm_replay_check_bmp, + .recheck = xfrm_replay_check_bmp, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_bmp, }; @@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = { static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, + .recheck = xfrm_replay_recheck_esn, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_esn, }; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7128dde0fe1a..c8b903df943f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; + struct xfrm_replay_state_esn *rs; - if ((p->flags & XFRM_STATE_ESN) && !rt) - return -EINVAL; + if (p->flags & XFRM_STATE_ESN) { + if (!rt) + return -EINVAL; + + rs = nla_data(rt); + + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) + return -EINVAL; + + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && + nla_len(rt) != sizeof(*rs)) + return -EINVAL; + } if (!rt) return 0; @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es struct nlattr *rp) { struct xfrm_replay_state_esn *up; + int ulen; if (!replay_esn || !rp) return 0; up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up); - if (xfrm_replay_state_esn_len(replay_esn) != - xfrm_replay_state_esn_len(up)) + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) return -EINVAL; return 0; @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; + int klen, ulen; if (!rta) return 0; up = nla_data(rta); + klen = xfrm_replay_state_esn_len(up); + ulen = nla_len(rta) >= klen ? klen : sizeof(*up); - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + p = kzalloc(klen, GFP_KERNEL); if (!p) return -ENOMEM; - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + pp = kzalloc(klen, GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; } + memcpy(p, up, ulen); + memcpy(pp, up, ulen); + *replay_esn = p; *preplay_esn = pp; @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * * somehow made shareable and move it to xfrm_state.c - JHS * */ -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, + int update_esn) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, goto error; /* override default values from above */ - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 0); return x; @@ -689,6 +709,7 @@ out: static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { + memset(p, 0, sizeof(*p)); memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) return -EMSGSIZE; algo = nla_data(nla); - strcpy(algo->alg_name, auth->alg_name); + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len; @@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, { struct xfrm_dump_info info; struct sk_buff *skb; + int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) @@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, info.nlmsg_seq = seq; info.nlmsg_flags = 0; - if (dump_one_state(x, 0, &info)) { + err = dump_one_state(x, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); } return skb; @@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { + memset(p, 0, sizeof(*p)); memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); @@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; + memset(up, 0, sizeof(*up)); memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); @@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, { struct xfrm_dump_info info; struct sk_buff *skb; + int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) @@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, info.nlmsg_seq = seq; info.nlmsg_flags = 0; - if (dump_one_policy(xp, dir, 0, &info) < 0) { + err = dump_one_policy(xp, dir, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); } return skb; @@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, goto out; spin_lock_bh(&x->lock); - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 1); spin_unlock_bh(&x->lock); c.event = nlh->nlmsg_type; diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 6a3ee981931d..978416dd31ca 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -98,24 +98,24 @@ try-run = $(shell set -e; \ # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) as-option = $(call try-run,\ - $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2)) + $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2)) # as-instr # Usage: cflags-y += $(call as-instr,instr,option1,option2) as-instr = $(call try-run,\ - printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3)) + printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) # cc-option # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) cc-option = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2)) + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) # cc-option-yn # Usage: flag := $(call cc-option-yn,-march=winchip-c6) cc-option-yn = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n) + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) # cc-option-align # Prefix align with either -falign or -malign @@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\ # cc-disable-warning # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) cc-disable-warning = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1))) + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) # cc-version # Usage gcc-ver := $(call cc-version) @@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3)) # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) cc-ldoption = $(call try-run,\ - $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2)) + $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) # ld-option # Usage: LDFLAGS += $(call ld-option, -X) @@ -209,7 +209,7 @@ endif # >$< substitution to preserve $ when reloading .cmd file # note: when using inline perl scripts [perl -e '...$$t=1;...'] # in $(cmd_xxx) double $$ your perl vars -make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))) +make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))) # Find any prerequisites that is newer than target or that does not exist. # PHONY targets skipped in both cases. diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh index debecb5561c4..7f2126df91f2 100644 --- a/scripts/gcc-version.sh +++ b/scripts/gcc-version.sh @@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then exit 1 fi -MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1) -MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1) +MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1) +MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1) if [ "x$with_patchlevel" != "x" ] ; then - PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1) + PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1) printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL else printf "%02d%02d\\n" $MAJOR $MINOR diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh index 29493dc4528d..12dbd0b11ea4 100644 --- a/scripts/gcc-x86_32-has-stack-protector.sh +++ b/scripts/gcc-x86_32-has-stack-protector.sh @@ -1,6 +1,6 @@ #!/bin/sh -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs" if [ "$?" -eq "0" ] ; then echo y else diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index afaec618b395..973e8c141567 100644 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,6 +1,6 @@ #!/bin/sh -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" if [ "$?" -eq "0" ] ; then echo y else diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh index fa59cbf9d62c..854d9c7c675c 100755 --- a/scripts/kconfig/check.sh +++ b/scripts/kconfig/check.sh @@ -1,6 +1,6 @@ #!/bin/sh # Needed for systems without gettext -$* -xc -o /dev/null - > /dev/null 2>&1 << EOF +$* -x c -o /dev/null - > /dev/null 2>&1 << EOF #include <libintl.h> int main() { diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh index 82cc3a85e7f8..50df490fe1de 100644 --- a/scripts/kconfig/lxdialog/check-lxdialog.sh +++ b/scripts/kconfig/lxdialog/check-lxdialog.sh @@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15 # Check if we can link to ncurses check() { - $cc -xc - -o $tmp 2>/dev/null <<'EOF' + $cc -x c - -o $tmp 2>/dev/null <<'EOF' #include CURSES_LOC main() {} EOF diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index bccf07ddd0b6..3346f4236ebe 100644 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -463,6 +463,8 @@ while(<CIN>) { if (defined($configs{$1})) { if ($localyesconfig) { $setconfigs{$1} = 'y'; + print "$1=y\n"; + next; } else { $setconfigs{$1} = $2; } diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 8a7b15598ea9..d0d748e72915 100644 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -109,7 +109,7 @@ esac if tar --owner=root --group=root --help >/dev/null 2>&1; then opts="--owner=root --group=root" fi - tar cf - . $opts | ${compress} > "${tarball}${file_ext}" + tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}" ) echo "Tarball successfully created in ${tarball}${file_ext}" diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 49a464f5595b..62fa2c574986 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, &xattr_data, sizeof(xattr_data), 0); - } - else if (rc == -ENODATA) + } else if (rc == -ENODATA && inode->i_op->removexattr) { rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); + } return rc; } diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index 86365857c088..04aa5c8adf74 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c @@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node) if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { struct sel_netnode *tail; tail = list_entry( - rcu_dereference(sel_netnode_hash[idx].list.prev), + rcu_dereference_protected(sel_netnode_hash[idx].list.prev, + lockdep_is_held(&sel_netnode_lock)), struct sel_netnode, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 573723843a04..1d6bf24c1172 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -138,7 +138,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, if (arg2 == 0) { yama_ptracer_del(NULL, myself); rc = 0; - } else if (arg2 == PR_SET_PTRACER_ANY) { + } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { rc = yama_ptracer_add(NULL, myself); } else { struct task_struct *tracer; diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c index 48d7c0aa5073..bd3ba8812e13 100644 --- a/sound/arm/pxa2xx-ac97-lib.c +++ b/sound/arm/pxa2xx-ac97-lib.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/gpio.h> #include <sound/ac97_codec.h> #include <sound/pxa2xx-lib.h> @@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void) static inline void pxa_ac97_cold_pxa27x(void) { + unsigned int timeout; + GCR &= GCR_COLD_RST; /* clear everything but nCRST */ GCR &= ~GCR_COLD_RST; /* then assert nCRST */ @@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void) clk_enable(ac97conf_clk); udelay(5); clk_disable(ac97conf_clk); - GCR = GCR_COLD_RST; - udelay(50); + GCR = GCR_COLD_RST | GCR_WARM_RST; + timeout = 100; /* wait for the codec-ready bit to be set */ + while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--) + mdelay(1); } #endif @@ -340,8 +345,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev) } if (cpu_is_pxa27x()) { - /* Use GPIO 113 as AC97 Reset on Bulverde */ + /* + * This gpio is needed for a work-around to a bug in the ac97 + * controller during warm reset. The direction and level is set + * here so that it is an output driven high when switching from + * AC97_nRESET alt function to generic gpio. + */ + ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH, + "pxa27x ac97 reset"); + if (ret < 0) { + pr_err("%s: gpio_request_one() failed: %d\n", + __func__, ret); + goto err_conf; + } pxa27x_assert_ac97reset(reset_gpio, 0); + ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK"); if (IS_ERR(ac97conf_clk)) { ret = PTR_ERR(ac97conf_clk); @@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe); void pxa2xx_ac97_hw_remove(struct platform_device *dev) { + if (cpu_is_pxa27x()) + gpio_free(reset_gpio); GCR |= GCR_ACLINK_OFF; free_irq(IRQ_AC97, NULL); if (ac97conf_clk) { diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a68aed7fce02..a58cf3594130 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -102,12 +102,15 @@ static int snd_compr_open(struct inode *inode, struct file *f) if (dirn != compr->direction) { pr_err("this device doesn't support this direction\n"); + snd_card_unref(compr->card); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + snd_card_unref(compr->card); return -ENOMEM; + } data->stream.ops = compr->ops; data->stream.direction = dirn; data->stream.private_data = compr->private_data; @@ -115,6 +118,7 @@ static int snd_compr_open(struct inode *inode, struct file *f) runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); if (!runtime) { kfree(data); + snd_card_unref(compr->card); return -ENOMEM; } runtime->state = SNDRV_PCM_STATE_OPEN; @@ -128,7 +132,8 @@ static int snd_compr_open(struct inode *inode, struct file *f) kfree(runtime); kfree(data); } - return ret; + snd_card_unref(compr->card); + return 0; } static int snd_compr_free(struct inode *inode, struct file *f) diff --git a/sound/core/control.c b/sound/core/control.c index 2487a6bb1c54..daa4fc8872f3 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -86,6 +86,7 @@ static int snd_ctl_open(struct inode *inode, struct file *file) write_lock_irqsave(&card->ctl_files_rwlock, flags); list_add_tail(&ctl->list, &card->ctl_files); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); + snd_card_unref(card); return 0; __error: @@ -93,6 +94,8 @@ static int snd_ctl_open(struct inode *inode, struct file *file) __error2: snd_card_file_remove(card, file); __error1: + if (card) + snd_card_unref(card); return err; } @@ -1433,6 +1436,8 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, spin_unlock_irq(&ctl->read_lock); schedule(); remove_wait_queue(&ctl->change_sleep, &wait); + if (ctl->card->shutdown) + return -ENODEV; if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&ctl->read_lock); diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 75ea16f35b1a..3f7f6628cf7b 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -100,8 +100,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file) if (hw == NULL) return -ENODEV; - if (!try_module_get(hw->card->module)) + if (!try_module_get(hw->card->module)) { + snd_card_unref(hw->card); return -EFAULT; + } init_waitqueue_entry(&wait, current); add_wait_queue(&hw->open_wait, &wait); @@ -129,6 +131,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file) mutex_unlock(&hw->open_mutex); schedule(); mutex_lock(&hw->open_mutex); + if (hw->card->shutdown) { + err = -ENODEV; + break; + } if (signal_pending(current)) { err = -ERESTARTSYS; break; @@ -148,6 +154,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file) mutex_unlock(&hw->open_mutex); if (err < 0) module_put(hw->card->module); + snd_card_unref(hw->card); return err; } @@ -459,12 +466,15 @@ static int snd_hwdep_dev_disconnect(struct snd_device *device) mutex_unlock(®ister_mutex); return -EINVAL; } + mutex_lock(&hwdep->open_mutex); + wake_up(&hwdep->open_wait); #ifdef CONFIG_SND_OSSEMUL if (hwdep->ossreg) snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device); #endif snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device); list_del_init(&hwdep->list); + mutex_unlock(&hwdep->open_mutex); mutex_unlock(®ister_mutex); return 0; } diff --git a/sound/core/init.c b/sound/core/init.c index d8ec849af128..7b012d15c2cf 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -213,6 +213,7 @@ int snd_card_create(int idx, const char *xid, spin_lock_init(&card->files_lock); INIT_LIST_HEAD(&card->files_list); init_waitqueue_head(&card->shutdown_sleep); + atomic_set(&card->refcount, 0); #ifdef CONFIG_PM mutex_init(&card->power_lock); init_waitqueue_head(&card->power_sleep); @@ -446,21 +447,36 @@ static int snd_card_do_free(struct snd_card *card) return 0; } +/** + * snd_card_unref - release the reference counter + * @card: the card instance + * + * Decrements the reference counter. When it reaches to zero, wake up + * the sleeper and call the destructor if needed. + */ +void snd_card_unref(struct snd_card *card) +{ + if (atomic_dec_and_test(&card->refcount)) { + wake_up(&card->shutdown_sleep); + if (card->free_on_last_close) + snd_card_do_free(card); + } +} +EXPORT_SYMBOL(snd_card_unref); + int snd_card_free_when_closed(struct snd_card *card) { - int free_now = 0; - int ret = snd_card_disconnect(card); - if (ret) - return ret; + int ret; - spin_lock(&card->files_lock); - if (list_empty(&card->files_list)) - free_now = 1; - else - card->free_on_last_close = 1; - spin_unlock(&card->files_lock); + atomic_inc(&card->refcount); + ret = snd_card_disconnect(card); + if (ret) { + atomic_dec(&card->refcount); + return ret; + } - if (free_now) + card->free_on_last_close = 1; + if (atomic_dec_and_test(&card->refcount)) snd_card_do_free(card); return 0; } @@ -474,7 +490,7 @@ int snd_card_free(struct snd_card *card) return ret; /* wait, until all devices are ready for the free operation */ - wait_event(card->shutdown_sleep, list_empty(&card->files_list)); + wait_event(card->shutdown_sleep, !atomic_read(&card->refcount)); snd_card_do_free(card); return 0; } @@ -886,6 +902,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) return -ENODEV; } list_add(&mfile->list, &card->files_list); + atomic_inc(&card->refcount); spin_unlock(&card->files_lock); return 0; } @@ -908,7 +925,6 @@ EXPORT_SYMBOL(snd_card_file_add); int snd_card_file_remove(struct snd_card *card, struct file *file) { struct snd_monitor_file *mfile, *found = NULL; - int last_close = 0; spin_lock(&card->files_lock); list_for_each_entry(mfile, &card->files_list, list) { @@ -923,19 +939,13 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) break; } } - if (list_empty(&card->files_list)) - last_close = 1; spin_unlock(&card->files_lock); - if (last_close) { - wake_up(&card->shutdown_sleep); - if (card->free_on_last_close) - snd_card_do_free(card); - } if (!found) { snd_printk(KERN_ERR "ALSA card file remove problem (%p)\n", file); return -ENOENT; } kfree(found); + snd_card_unref(card); return 0; } diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c index 18297f7f2c55..c353768854e6 100644 --- a/sound/core/oss/mixer_oss.c +++ b/sound/core/oss/mixer_oss.c @@ -52,14 +52,19 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file) SNDRV_OSS_DEVICE_TYPE_MIXER); if (card == NULL) return -ENODEV; - if (card->mixer_oss == NULL) + if (card->mixer_oss == NULL) { + snd_card_unref(card); return -ENODEV; + } err = snd_card_file_add(card, file); - if (err < 0) + if (err < 0) { + snd_card_unref(card); return err; + } fmixer = kzalloc(sizeof(*fmixer), GFP_KERNEL); if (fmixer == NULL) { snd_card_file_remove(card, file); + snd_card_unref(card); return -ENOMEM; } fmixer->card = card; @@ -68,8 +73,10 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file) if (!try_module_get(card->module)) { kfree(fmixer); snd_card_file_remove(card, file); + snd_card_unref(card); return -EFAULT; } + snd_card_unref(card); return 0; } diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 08fde0060fd9..4c1cc51772e6 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -2441,6 +2441,10 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); + if (pcm->card->shutdown) { + err = -ENODEV; + break; + } if (signal_pending(current)) { err = -ERESTARTSYS; break; @@ -2450,6 +2454,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; + snd_card_unref(pcm->card); return err; __error: @@ -2457,6 +2462,8 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) __error2: snd_card_file_remove(pcm->card, file); __error1: + if (pcm) + snd_card_unref(pcm->card); return err; } diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 1a3070b4e5b5..e30e1be30a21 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -1086,11 +1086,19 @@ static int snd_pcm_dev_disconnect(struct snd_device *device) if (list_empty(&pcm->list)) goto unlock; + mutex_lock(&pcm->open_mutex); + wake_up(&pcm->open_wait); list_del_init(&pcm->list); for (cidx = 0; cidx < 2; cidx++) - for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) - if (substream->runtime) + for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) { + snd_pcm_stream_lock_irq(substream); + if (substream->runtime) { substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED; + wake_up(&substream->runtime->sleep); + wake_up(&substream->runtime->tsleep); + } + snd_pcm_stream_unlock_irq(substream); + } list_for_each_entry(notify, &snd_pcm_notify_list, list) { notify->n_disconnect(pcm); } @@ -1106,6 +1114,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device) } snd_unregister_device(devtype, pcm->card, pcm->device); } + mutex_unlock(&pcm->open_mutex); unlock: mutex_unlock(®ister_mutex); return 0; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 3fe99e644eb8..d535b341aa82 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -369,6 +369,14 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime) return usecs; } +static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state) +{ + snd_pcm_stream_lock_irq(substream); + if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED) + substream->runtime->status->state = state; + snd_pcm_stream_unlock_irq(substream); +} + static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -452,7 +460,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, runtime->boundary *= 2; snd_pcm_timer_resolution_change(substream); - runtime->status->state = SNDRV_PCM_STATE_SETUP; + snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); @@ -464,7 +472,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, /* hardware might be unusable from this time, so we force application to retry to set the correct hardware parameter settings */ - runtime->status->state = SNDRV_PCM_STATE_OPEN; + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); return err; @@ -512,7 +520,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream) return -EBADFD; if (substream->ops->hw_free) result = substream->ops->hw_free(substream); - runtime->status->state = SNDRV_PCM_STATE_OPEN; + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); pm_qos_remove_request(&substream->latency_pm_qos_req); return result; } @@ -1320,7 +1328,7 @@ static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; - runtime->status->state = SNDRV_PCM_STATE_PREPARED; + snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED); } static struct action_ops snd_pcm_action_prepare = { @@ -1500,6 +1508,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, down_read(&snd_pcm_link_rwsem); snd_pcm_stream_lock_irq(substream); remove_wait_queue(&to_check->sleep, &wait); + if (card->shutdown) { + result = -ENODEV; + break; + } if (tout == 0) { if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) result = -ESTRPIPE; @@ -1623,6 +1635,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); _nolock: + snd_card_unref(substream1->pcm->card); fput(file); if (res < 0) kfree(group); @@ -2097,7 +2110,10 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_PLAYBACK); - return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); + err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); + if (pcm) + snd_card_unref(pcm->card); + return err; } static int snd_pcm_capture_open(struct inode *inode, struct file *file) @@ -2108,7 +2124,10 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_CAPTURE); - return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); + err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); + if (pcm) + snd_card_unref(pcm->card); + return err; } static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) @@ -2145,6 +2164,10 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); + if (pcm->card->shutdown) { + err = -ENODEV; + break; + } if (signal_pending(current)) { err = -ERESTARTSYS; break; diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index ebf6e49ad3d4..1bb95aeea084 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -379,8 +379,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) if (rmidi == NULL) return -ENODEV; - if (!try_module_get(rmidi->card->module)) + if (!try_module_get(rmidi->card->module)) { + snd_card_unref(rmidi->card); return -ENXIO; + } mutex_lock(&rmidi->open_mutex); card = rmidi->card; @@ -422,6 +424,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) mutex_unlock(&rmidi->open_mutex); schedule(); mutex_lock(&rmidi->open_mutex); + if (rmidi->card->shutdown) { + err = -ENODEV; + break; + } if (signal_pending(current)) { err = -ERESTARTSYS; break; @@ -440,6 +446,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) #endif file->private_data = rawmidi_file; mutex_unlock(&rmidi->open_mutex); + snd_card_unref(rmidi->card); return 0; __error: @@ -447,6 +454,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) __error_card: mutex_unlock(&rmidi->open_mutex); module_put(rmidi->card->module); + snd_card_unref(rmidi->card); return err; } @@ -991,6 +999,8 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun spin_unlock_irq(&runtime->lock); schedule(); remove_wait_queue(&runtime->sleep, &wait); + if (rfile->rmidi->card->shutdown) + return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; if (!runtime->avail) @@ -1234,6 +1244,8 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, spin_unlock_irq(&runtime->lock); timeout = schedule_timeout(30 * HZ); remove_wait_queue(&runtime->sleep, &wait); + if (rfile->rmidi->card->shutdown) + return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; if (!runtime->avail && !timeout) @@ -1609,9 +1621,20 @@ static int snd_rawmidi_dev_register(struct snd_device *device) static int snd_rawmidi_dev_disconnect(struct snd_device *device) { struct snd_rawmidi *rmidi = device->device_data; + int dir; mutex_lock(®ister_mutex); + mutex_lock(&rmidi->open_mutex); + wake_up(&rmidi->open_wait); list_del_init(&rmidi->list); + for (dir = 0; dir < 2; dir++) { + struct snd_rawmidi_substream *s; + list_for_each_entry(s, &rmidi->streams[dir].substreams, list) { + if (s->runtime) + wake_up(&s->runtime->sleep); + } + } + #ifdef CONFIG_SND_OSSEMUL if (rmidi->ossreg) { if ((int)rmidi->device == midi_map[rmidi->card->number]) { @@ -1626,6 +1649,7 @@ static int snd_rawmidi_dev_disconnect(struct snd_device *device) } #endif /* CONFIG_SND_OSSEMUL */ snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device); + mutex_unlock(&rmidi->open_mutex); mutex_unlock(®ister_mutex); return 0; } diff --git a/sound/core/sound.c b/sound/core/sound.c index 28f35593a750..3700d96fb3fb 100644 --- a/sound/core/sound.c +++ b/sound/core/sound.c @@ -99,6 +99,10 @@ static void snd_request_other(int minor) * * Checks that a minor device with the specified type is registered, and returns * its user data pointer. + * + * This function increments the reference counter of the card instance + * if an associated instance with the given minor number and type is found. + * The caller must call snd_card_unref() appropriately later. */ void *snd_lookup_minor_data(unsigned int minor, int type) { @@ -109,9 +113,11 @@ void *snd_lookup_minor_data(unsigned int minor, int type) return NULL; mutex_lock(&sound_mutex); mreg = snd_minors[minor]; - if (mreg && mreg->type == type) + if (mreg && mreg->type == type) { private_data = mreg->private_data; - else + if (private_data && mreg->card_ptr) + atomic_inc(&mreg->card_ptr->refcount); + } else private_data = NULL; mutex_unlock(&sound_mutex); return private_data; @@ -276,6 +282,7 @@ int snd_register_device_for_dev(int type, struct snd_card *card, int dev, preg->device = dev; preg->f_ops = f_ops; preg->private_data = private_data; + preg->card_ptr = card; mutex_lock(&sound_mutex); #ifdef CONFIG_SND_DYNAMIC_MINORS minor = snd_find_free_minor(type); diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c index c70092043061..ec86009141d0 100644 --- a/sound/core/sound_oss.c +++ b/sound/core/sound_oss.c @@ -40,6 +40,9 @@ static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS]; static DEFINE_MUTEX(sound_oss_mutex); +/* NOTE: This function increments the refcount of the associated card like + * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately + */ void *snd_lookup_oss_minor_data(unsigned int minor, int type) { struct snd_minor *mreg; @@ -49,9 +52,11 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type) return NULL; mutex_lock(&sound_oss_mutex); mreg = snd_oss_minors[minor]; - if (mreg && mreg->type == type) + if (mreg && mreg->type == type) { private_data = mreg->private_data; - else + if (private_data && mreg->card_ptr) + atomic_inc(&mreg->card_ptr->refcount); + } else private_data = NULL; mutex_unlock(&sound_oss_mutex); return private_data; @@ -123,6 +128,7 @@ int snd_register_oss_device(int type, struct snd_card *card, int dev, preg->device = dev; preg->f_ops = f_ops; preg->private_data = private_data; + preg->card_ptr = card; mutex_lock(&sound_oss_mutex); snd_oss_minors[minor] = preg; minor_unit = SNDRV_MINOR_OSS_DEVICE(minor); diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index ad079b63b8ba..bdc963e722ad 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -119,6 +119,7 @@ struct loopback_pcm { unsigned int period_size_frac; unsigned long last_jiffies; struct timer_list timer; + spinlock_t timer_lock; }; static struct platform_device *devices[SNDRV_CARDS]; @@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm) unsigned long tick; unsigned int rate_shift = get_rate_shift(dpcm); + spin_lock(&dpcm->timer_lock); if (rate_shift != dpcm->pcm_rate_shift) { dpcm->pcm_rate_shift = rate_shift; dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size); @@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm) tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps; dpcm->timer.expires = jiffies + tick; add_timer(&dpcm->timer); + spin_unlock(&dpcm->timer_lock); } static inline void loopback_timer_stop(struct loopback_pcm *dpcm) { + spin_lock(&dpcm->timer_lock); del_timer(&dpcm->timer); dpcm->timer.expires = 0; + spin_unlock(&dpcm->timer_lock); } #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK) @@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream) dpcm->substream = substream; setup_timer(&dpcm->timer, loopback_timer_function, (unsigned long)dpcm); + spin_lock_init(&dpcm->timer_lock); cable = loopback->cables[substream->number][dev]; if (!cable) { diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 9473fca9681d..8b0f99688303 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } + if (!kctl) + return -ENOMEM; if (reg >= AC97_PHONE && reg <= AC97_PCM) set_tlv_db_scale(kctl, db_scale_5bit_12db_max); else diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index bdd6164e9c7e..131fd1f5db05 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) spin_lock(&codec->reg_lock); if (!pvoice->running) { - spin_unlock_irq(&codec->reg_lock); + spin_unlock(&codec->reg_lock); return 0; } outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 754924081d0a..a78fdf466fa7 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = { .ca0108_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */ + /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */ + /* This is MAEM8986, 0202 is MAEM8980 */ + {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102, + .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]", + .id = "EMU1010", + .emu10k2_chip = 1, + .ca0108_chip = 1, + .spk71 = 1, + .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */ /* Tested by James@superbug.co.uk 8th July 2005. */ /* This is MAEM8810, 0202 is MAEM8820 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102, diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index e525a32f1f53..f60328dc99bf 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2279,6 +2279,7 @@ int snd_hda_codec_reset(struct hda_codec *codec) } if (codec->patch_ops.free) codec->patch_ops.free(codec); + memset(&codec->patch_ops, 0, sizeof(codec->patch_ops)); snd_hda_jack_tbl_clear(codec); codec->proc_widget_hook = NULL; codec->spec = NULL; @@ -2292,7 +2293,6 @@ int snd_hda_codec_reset(struct hda_codec *codec) codec->num_pcms = 0; codec->pcm_info = NULL; codec->preset = NULL; - memset(&codec->patch_ops, 0, sizeof(codec->patch_ops)); codec->slave_dig_outs = NULL; codec->spdif_status_reset = 0; module_put(codec->owner); diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index e59e2f059b6e..0074aee6d393 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer, if (digi1 & AC_DIG1_EMPHASIS) snd_iprintf(buffer, " Preemphasis"); if (digi1 & AC_DIG1_COPYRIGHT) - snd_iprintf(buffer, " Copyright"); + snd_iprintf(buffer, " Non-Copyright"); if (digi1 & AC_DIG1_NONAUDIO) snd_iprintf(buffer, " Non-Audio"); if (digi1 & AC_DIG1_PROFESSIONAL) diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 7143393927da..e23ad3f045fc 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -544,6 +544,7 @@ static int ad198x_build_pcms(struct hda_codec *codec) if (spec->multiout.dig_out_nid) { info++; codec->num_pcms++; + codec->spdif_status_reset = 1; info->name = "AD198x Digital"; info->pcm_type = HDA_PCM_TYPE_SPDIF; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 21d91d580da8..70b4f02da2c0 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -276,6 +276,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) { + snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } @@ -287,6 +291,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) { + snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index c83ccdba1e5a..3605fbb6ef15 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -95,8 +95,8 @@ enum { #define CS420X_VENDOR_NID 0x11 #define CS_DIG_OUT1_PIN_NID 0x10 #define CS_DIG_OUT2_PIN_NID 0x15 -#define CS_DMIC1_PIN_NID 0x12 -#define CS_DMIC2_PIN_NID 0x0e +#define CS_DMIC1_PIN_NID 0x0e +#define CS_DMIC2_PIN_NID 0x12 /* coef indices */ #define IDX_SPDIF_STAT 0x0000 @@ -460,6 +460,7 @@ static int parse_output(struct hda_codec *codec) memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->line_outs = 0; + memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins)); } return 0; @@ -1084,14 +1085,18 @@ static void init_input(struct hda_codec *codec) cs_automic(codec); coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */ + cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); + + coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG); if (is_active_pin(codec, CS_DMIC2_PIN_NID)) - coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */ + coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */ if (is_active_pin(codec, CS_DMIC1_PIN_NID)) - coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off + coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off * No effect if SPDIF_OUT2 is * selected in IDX_SPDIF_CTL. */ - cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); + + cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef); } else { if (spec->mic_detect) cs_automic(codec); @@ -1112,7 +1117,7 @@ static const struct hda_verb cs_coef_init_verbs[] = { | 0x0400 /* Disable Coefficient Auto increment */ )}, /* Beep */ - {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG}, + {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG}, {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */ {} /* terminator */ @@ -1427,7 +1432,7 @@ static int patch_cs420x(struct hda_codec *codec) return 0; error: - kfree(codec->spec); + cs_free(codec); codec->spec = NULL; return err; } @@ -1984,7 +1989,7 @@ static int patch_cs4210(struct hda_codec *codec) return 0; error: - kfree(codec->spec); + cs_free(codec); codec->spec = NULL; return err; } @@ -2009,7 +2014,7 @@ static int patch_cs4213(struct hda_codec *codec) return 0; error: - kfree(codec->spec); + cs_free(codec); codec->spec = NULL; return err; } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 1b3b8c8b0226..f943889afbec 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -962,8 +962,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, if (!static_hdmi_pcm && (eld->eld_valid || eld->lpcm_sad_ready)) { snd_hdmi_eld_update_pcm_info(eld, hinfo); if (hinfo->channels_min > hinfo->channels_max || - !hinfo->rates || !hinfo->formats) + !hinfo->rates || !hinfo->formats) { + per_cvt->assigned = 0; + hinfo->nid = 0; + snd_hda_spdif_ctls_unassign(codec, pin_idx); return -ENODEV; + } } /* Store the updated parameters */ @@ -1027,6 +1031,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", codec->addr, pin_nid, eld->monitor_present, eld_valid); + eld->eld_valid = false; if (eld_valid) { if (!snd_hdmi_get_eld(eld, codec, pin_nid)) snd_hdmi_show_eld(eld); @@ -1317,6 +1322,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx) if (pcmdev > 0) sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev); + if (!is_jack_detectable(codec, per_pin->pin_nid)) + strncat(hdmi_str, " Phantom", + sizeof(hdmi_str) - strlen(hdmi_str) - 1); return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0); } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 152d91b7e103..adb97d6c504a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -188,6 +188,7 @@ struct alc_spec { unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */ unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */ unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */ + unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */ /* auto-mute control */ int automute_mode; @@ -601,6 +602,8 @@ static void alc_line_automute(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; + if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) + return; /* check LO jack only when it's different from HP */ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0]) return; @@ -2662,8 +2665,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch, return "PCM"; break; } - if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name))) + if (ch >= ARRAY_SIZE(channel_name)) { + snd_BUG(); return "PCM"; + } return channel_name[ch]; } @@ -4317,6 +4322,7 @@ static void alc_auto_init_std(struct hda_codec *codec) ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) static const struct snd_pci_quirk beep_white_list[] = { + SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1), @@ -4365,7 +4371,8 @@ static int alc_parse_auto_config(struct hda_codec *codec, return 0; /* can't find valid BIOS pin config */ } - if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && + if (!spec->no_primary_hp && + cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && cfg->line_outs <= cfg->hp_outs) { /* use HP as primary out */ cfg->speaker_outs = cfg->line_outs; @@ -4734,6 +4741,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB), SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), + SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST), SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), @@ -5076,6 +5084,7 @@ enum { ALC889_FIXUP_DAC_ROUTE, ALC889_FIXUP_MBP_VREF, ALC889_FIXUP_IMAC91_VREF, + ALC882_FIXUP_NO_PRIMARY_HP, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -5199,6 +5208,17 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec, spec->keep_vref_in_automute = 1; } +/* Don't take HP output as primary + * strangely, the speaker output doesn't work on VAIO Z through DAC 0x05 + */ +static void alc882_fixup_no_primary_hp(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + if (action == ALC_FIXUP_ACT_PRE_PROBE) + spec->no_primary_hp = 1; +} + static const struct alc_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = ALC_FIXUP_PINS, @@ -5381,6 +5401,10 @@ static const struct alc_fixup alc882_fixups[] = { .chained = true, .chain_id = ALC882_FIXUP_GPIO1, }, + [ALC882_FIXUP_NO_PRIMARY_HP] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc882_fixup_no_primary_hp, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -5415,6 +5439,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -5435,6 +5461,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), + SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), @@ -5455,6 +5482,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"}, {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"}, {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {} }; @@ -6160,6 +6188,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), @@ -6287,6 +6316,12 @@ static int patch_alc269(struct hda_codec *codec) if (err < 0) goto error; + alc_pick_fixup(codec, alc269_fixup_models, + alc269_fixup_tbl, alc269_fixups); + alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); + + alc_auto_parse_customize_define(codec); + if (codec->vendor_id == 0x10ec0269) { spec->codec_variant = ALC269_TYPE_ALC269VA; switch (alc_get_coef0(codec) & 0x00f0) { @@ -6311,12 +6346,6 @@ static int patch_alc269(struct hda_codec *codec) alc269_fill_coef(codec); } - alc_pick_fixup(codec, alc269_fixup_models, - alc269_fixup_tbl, alc269_fixups); - alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); - - alc_auto_parse_customize_define(codec); - /* automatic parse from the BIOS config */ err = alc269_parse_auto_config(codec); if (err < 0) @@ -6493,8 +6522,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec, const struct alc_fixup *fix, int action) { if (action == ALC_FIXUP_ACT_PRE_PROBE) { - snd_hda_override_pin_caps(codec, 0x18, 0x00001714); - snd_hda_override_pin_caps(codec, 0x19, 0x0000171c); + snd_hda_override_pin_caps(codec, 0x18, 0x00000734); + snd_hda_override_pin_caps(codec, 0x19, 0x0000073c); } } @@ -7009,6 +7038,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 }, { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 }, { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, + { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, + { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, + { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", .patch = patch_alc861 }, { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, @@ -7022,6 +7054,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { .patch = patch_alc662 }, { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 }, { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, + { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, @@ -7039,6 +7072,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 }, { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 }, { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 }, + { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 }, {} /* terminator */ }; diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index fd5331244171..137b67f8e69a 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -1072,7 +1072,7 @@ static struct snd_kcontrol_new stac_smux_mixer = { static const char * const slave_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", - "Headphone", "Speaker", "IEC958", + "Headphone", "Speaker", "IEC958", "PCM", NULL }; @@ -1695,7 +1695,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659, - "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), + "HP Pavilion dv7", STAC_HP_DV7_4000), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A, "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B, diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 3998d09bf241..9dafacdb5195 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -1868,11 +1868,11 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec) { struct via_spec *spec = codec->spec; const struct auto_pin_cfg *cfg = &spec->autocfg; - int i, dac_num; + int i; hda_nid_t nid; + spec->multiout.num_dacs = 0; spec->multiout.dac_nids = spec->private_dac_nids; - dac_num = 0; for (i = 0; i < cfg->line_outs; i++) { hda_nid_t dac = 0; nid = cfg->line_out_pins[i]; @@ -1883,16 +1883,13 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec) if (!i && parse_output_path(codec, nid, dac, 1, &spec->out_mix_path)) dac = spec->out_mix_path.path[0]; - if (dac) { - spec->private_dac_nids[i] = dac; - dac_num++; - } + if (dac) + spec->private_dac_nids[spec->multiout.num_dacs++] = dac; } if (!spec->out_path[0].depth && spec->out_mix_path.depth) { spec->out_path[0] = spec->out_mix_path; spec->out_mix_path.depth = 0; } - spec->multiout.num_dacs = dac_num; return 0; } @@ -3668,6 +3665,18 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec) update_power_state(codec, 0x21, AC_PWRST_D3); } +/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e + * Replace this with mixer NID 0x1c + */ +static void fix_vt1802_connections(struct hda_codec *codec) +{ + static hda_nid_t conn_24[] = { 0x14, 0x1c }; + static hda_nid_t conn_33[] = { 0x1c }; + + snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24); + snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33); +} + /* patch for vt2002P */ static int patch_vt2002P(struct hda_codec *codec) { @@ -3682,6 +3691,8 @@ static int patch_vt2002P(struct hda_codec *codec) spec->aa_mix_nid = 0x21; override_mic_boost(codec, 0x2b, 0, 3, 40); override_mic_boost(codec, 0x29, 0, 3, 40); + if (spec->codec_type == VT1802) + fix_vt1802_connections(codec); add_secret_dac_path(codec); /* automatic parse from the BIOS config */ diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c index 764cc93dbca4..075d5aa1fee0 100644 --- a/sound/pci/ice1712/prodigy_hifi.c +++ b/sound/pci/ice1712/prodigy_hifi.c @@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem } static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1); +static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0); static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = { { @@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = { .info = ak4396_dac_vol_info, .get = ak4396_dac_vol_get, .put = ak4396_dac_vol_put, - .tlv = { .p = db_scale_wm_dac }, + .tlv = { .p = ak4396_db_scale }, }, }; diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index b4819d5e41db..64da910ac34c 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c @@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream) spin_lock_irq(&rme32->lock); rme32->capture_substream = NULL; rme32->capture_periodsize = 0; - spin_unlock(&rme32->lock); + spin_unlock_irq(&rme32->lock); return 0; } diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 2feaf376e94b..0f370861bbae 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -1,8 +1,9 @@ snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o snd-soc-core-objs += soc-pcm.o soc-io.o -snd-soc-dmaengine-pcm-objs := soc-dmaengine-pcm.o -obj-$(CONFIG_SND_SOC_DMAENGINE_PCM) += snd-soc-dmaengine-pcm.o +ifneq ($(CONFIG_SND_SOC_DMAENGINE_PCM),) +snd-soc-core-objs += soc-dmaengine-pcm.o +endif obj-$(CONFIG_SND_SOC) += snd-soc-core.o obj-$(CONFIG_SND_SOC) += codecs/ diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c index 5be42bf56996..4068f2491232 100644 --- a/sound/soc/codecs/sigmadsp.c +++ b/sound/soc/codecs/sigmadsp.c @@ -225,7 +225,7 @@ EXPORT_SYMBOL(process_sigma_firmware); static int sigma_action_write_regmap(void *control_data, const struct sigma_action *sa, size_t len) { - return regmap_raw_write(control_data, le16_to_cpu(sa->addr), + return regmap_raw_write(control_data, be16_to_cpu(sa->addr), sa->payload, len - 2); } diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a75c3766aede..bb9f07037485 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -190,9 +190,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY); if (wm2000->speech_clarity) - ret &= ~WM2000_SPEECH_CLARITY; - else ret |= WM2000_SPEECH_CLARITY; + else + ret &= ~WM2000_SPEECH_CLARITY; wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret); wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33); @@ -692,7 +692,7 @@ static int wm2000_resume(struct snd_soc_codec *codec) #endif static const struct regmap_config wm2000_regmap = { - .reg_bits = 8, + .reg_bits = 16, .val_bits = 8, }; diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c index 32682c1b7cde..9932aacaa5b8 100644 --- a/sound/soc/codecs/wm2200.c +++ b/sound/soc/codecs/wm2200.c @@ -897,8 +897,6 @@ static const char *wm2200_mixer_texts[] = { "EQR", "LHPF1", "LHPF2", - "LHPF3", - "LHPF4", "DSP1.1", "DSP1.2", "DSP1.3", @@ -931,7 +929,6 @@ static int wm2200_mixer_values[] = { 0x25, 0x50, /* EQ */ 0x51, - 0x52, 0x60, /* LHPF1 */ 0x61, /* LHPF2 */ 0x68, /* DSP1 */ @@ -993,9 +990,9 @@ SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL, SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1), -SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, +SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L, WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1), -SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, +SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L, WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L, @@ -1028,7 +1025,7 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L, WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0, digital_tlv), SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT, - WM2200_SPK1R_MUTE_SHIFT, 1, 0), + WM2200_SPK1R_MUTE_SHIFT, 1, 1), }; WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE); @@ -1380,15 +1377,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) case SND_SOC_DAIFMT_DSP_A: fmt_val = 0; break; - case SND_SOC_DAIFMT_DSP_B: - fmt_val = 1; - break; case SND_SOC_DAIFMT_I2S: fmt_val = 2; break; - case SND_SOC_DAIFMT_LEFT_J: - fmt_val = 3; - break; default: dev_err(codec->dev, "Unsupported DAI format %d\n", fmt & SND_SOC_DAIFMT_FORMAT_MASK); @@ -1440,7 +1431,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV, lrclk); snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5, - WM2200_AIF1_FMT_MASK << 1, fmt_val << 1); + WM2200_AIF1_FMT_MASK, fmt_val); return 0; } @@ -2091,6 +2082,7 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c, switch (wm2200->rev) { case 0: + case 1: ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch, ARRAY_SIZE(wm2200_reva_patch)); if (ret != 0) { diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c index b9c185ce64e4..a8d03ab5ea2e 100644 --- a/sound/soc/codecs/wm5100.c +++ b/sound/soc/codecs/wm5100.c @@ -1296,15 +1296,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) case SND_SOC_DAIFMT_DSP_A: mask = 0; break; - case SND_SOC_DAIFMT_DSP_B: - mask = 1; - break; case SND_SOC_DAIFMT_I2S: mask = 2; break; - case SND_SOC_DAIFMT_LEFT_J: - mask = 3; - break; default: dev_err(codec->dev, "Unsupported DAI format %d\n", fmt & SND_SOC_DAIFMT_FORMAT_MASK); diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 72d5fdcd3cc2..6c37c7c2327e 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c @@ -783,7 +783,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream, wm8978->mclk_idx = -1; f_sel = wm8978->f_mclk; } else { - if (!wm8978->f_pllout) { + if (!wm8978->f_opclk) { /* We only enter here, if OPCLK is not used */ int ret = wm8978_configure_pll(codec); if (ret < 0) diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index b342ae50bcd6..757a52aed804 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -146,7 +146,7 @@ SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0), SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1), SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1), -SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1), +SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1), SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0), @@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]); /* Mic select */ static const struct snd_kcontrol_new wm9712_mic_src_controls = -SOC_DAPM_ENUM("Route", wm9712_enum[7]); +SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]); /* diff select */ static const struct snd_kcontrol_new wm9712_diff_sel_controls = @@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectl_controls), SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectr_controls), -SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, +SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0, + &wm9712_mic_src_controls), +SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0, &wm9712_mic_src_controls), SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, &wm9712_diff_sel_controls), @@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0), SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0), SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0), SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), +SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("HPOUTL"), @@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = { {"Mic PGA", NULL, "MIC1"}, {"Mic PGA", NULL, "MIC2"}, + /* microphones */ + {"Differential Mic", NULL, "MIC1"}, + {"Differential Mic", NULL, "MIC2"}, + {"Left Mic Select Source", "Mic 1", "MIC1"}, + {"Left Mic Select Source", "Mic 2", "MIC2"}, + {"Left Mic Select Source", "Stereo", "MIC1"}, + {"Left Mic Select Source", "Differential", "Differential Mic"}, + {"Right Mic Select Source", "Mic 1", "MIC1"}, + {"Right Mic Select Source", "Mic 2", "MIC2"}, + {"Right Mic Select Source", "Stereo", "MIC2"}, + {"Right Mic Select Source", "Differential", "Differential Mic"}, + /* left capture selector */ {"Left Capture Select", "Mic", "MIC1"}, {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c index e5f44440d1b9..fb67772130b5 100644 --- a/sound/soc/omap/mcbsp.c +++ b/sound/soc/omap/mcbsp.c @@ -691,7 +691,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux) { const char *signal, *src; - if (mcbsp->pdata->mux_signal) + if (!mcbsp->pdata->mux_signal) return -EINVAL; switch (mux) { diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c index 93bb8eee22b3..9c2f090167cc 100644 --- a/sound/soc/omap/omap-abe-twl6040.c +++ b/sound/soc/omap/omap-abe-twl6040.c @@ -181,7 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd) twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk"); twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk"); twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out"); - twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vinrator"); + twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator"); twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic"); twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic"); twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic"); diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c index ddc6cde14e2a..2526ecada5f1 100644 --- a/sound/soc/samsung/dma.c +++ b/sound/soc/samsung/dma.c @@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_RESUME, + SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U8 | @@ -246,15 +244,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd) switch (cmd) { case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; prtd->params->ops->trigger(prtd->params->ch); break; case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; prtd->params->ops->stop(prtd->params->ch); break; diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 74ed2dffbffd..91b728774dba 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -20,6 +20,7 @@ #include <linux/sh_dma.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/workqueue.h> #include <sound/soc.h> #include <sound/sh_fsi.h> @@ -199,7 +200,7 @@ struct fsi_stream { */ struct dma_chan *chan; struct sh_dmae_slave slave; /* see fsi_handler_init() */ - struct tasklet_struct tasklet; + struct work_struct work; dma_addr_t dma; }; @@ -968,9 +969,9 @@ static dma_addr_t fsi_dma_get_area(struct fsi_stream *io) return io->dma + samples_to_bytes(runtime, io->buff_sample_pos); } -static void fsi_dma_do_tasklet(unsigned long data) +static void fsi_dma_do_work(struct work_struct *work) { - struct fsi_stream *io = (struct fsi_stream *)data; + struct fsi_stream *io = container_of(work, struct fsi_stream, work); struct fsi_priv *fsi = fsi_stream_to_priv(io); struct dma_chan *chan; struct snd_soc_dai *dai; @@ -1023,7 +1024,7 @@ static void fsi_dma_do_tasklet(unsigned long data) * FIXME * * In DMAEngine case, codec and FSI cannot be started simultaneously - * since FSI is using tasklet. + * since FSI is using the scheduler work queue. * Therefore, in capture case, probably FSI FIFO will have got * overflow error in this point. * in that case, DMA cannot start transfer until error was cleared. @@ -1047,7 +1048,7 @@ static bool fsi_dma_filter(struct dma_chan *chan, void *param) static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io) { - tasklet_schedule(&io->tasklet); + schedule_work(&io->work); return 0; } @@ -1087,14 +1088,14 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io) if (!io->chan) return -EIO; - tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io); + INIT_WORK(&io->work, fsi_dma_do_work); return 0; } static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io) { - tasklet_kill(&io->tasklet); + cancel_work_sync(&io->work); fsi_stream_stop(fsi, io); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index f9aa9b50f82b..310ee4addc69 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3276,7 +3276,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_codec *codec; - list_for_each_entry(codec, &card->codec_dev_list, list) { + list_for_each_entry(codec, &card->codec_dev_list, card_list) { soc_dapm_shutdown_codec(&codec->dapm); if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(&codec->dapm, diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c index e45ccd851f6a..76d759ec40a2 100644 --- a/sound/soc/tegra/tegra_alc5632.c +++ b/sound/soc/tegra/tegra_alc5632.c @@ -95,7 +95,6 @@ static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = { .name = "Headset detection", .report = SND_JACK_HEADSET, .debounce_time = 150, - .invert = 1, }; static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = { diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 64aed432ae22..7da0d0aa72cb 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c @@ -485,7 +485,7 @@ static int __devinit snd_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; - struct snd_card *card; + struct snd_card *card = NULL; struct usb_device *device = interface_to_usbdev(intf); ret = create_card(device, intf, &card); diff --git a/sound/usb/card.c b/sound/usb/card.c index 223e4fe8e284..92463daa28fb 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -350,7 +350,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, return -ENOMEM; } - mutex_init(&chip->shutdown_mutex); + init_rwsem(&chip->shutdown_rwsem); chip->index = idx; chip->dev = dev; chip->card = card; @@ -575,9 +575,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, return; card = chip->card; - mutex_lock(®ister_mutex); - mutex_lock(&chip->shutdown_mutex); + down_write(&chip->shutdown_rwsem); chip->shutdown = 1; + up_write(&chip->shutdown_rwsem); + + mutex_lock(®ister_mutex); chip->num_interfaces--; #ifdef CONFIG_SWITCH @@ -599,11 +601,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; - mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); } else { - mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); } } @@ -635,16 +635,20 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) { int err = -ENODEV; + down_read(&chip->shutdown_rwsem); if (!chip->shutdown && !chip->probing) err = usb_autopm_get_interface(chip->pm_intf); + up_read(&chip->shutdown_rwsem); return err; } void snd_usb_autosuspend(struct snd_usb_audio *chip) { + down_read(&chip->shutdown_rwsem); if (!chip->shutdown && !chip->probing) usb_autopm_put_interface(chip->pm_intf); + up_read(&chip->shutdown_rwsem); } static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/sound/usb/card.h b/sound/usb/card.h index da5fa1ac4eda..7932b2ac0dec 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -87,6 +87,7 @@ struct snd_usb_substream { struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */ char *syncbuf; /* sync buffer for all sync URBs */ dma_addr_t sync_dma; /* DMA address of syncbuf */ + unsigned int speed; /* USB_SPEED_XXX */ u64 formats; /* format bitmasks (all or'ed) */ unsigned int num_formats; /* number of supported audio formats (list) */ diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 08dcce53720b..9ab2b3e22224 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -895,7 +895,8 @@ void snd_usb_init_substream(struct snd_usb_stream *as, subs->dev = as->chip->dev; subs->txfr_quirk = as->chip->txfr_quirk; subs->ops = audio_urb_ops[stream]; - if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH) + subs->speed = snd_usb_get_speed(subs->dev); + if (subs->speed >= USB_SPEED_HIGH) subs->ops.prepare_sync = prepare_capture_sync_urb_hs; snd_usb_set_pcm_ops(as->pcm, stream); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index c83f6143c0eb..34b9bb7fe87c 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -116,6 +116,7 @@ struct snd_usb_midi { struct list_head list; struct timer_list error_timer; spinlock_t disc_lock; + struct rw_semaphore disc_rwsem; struct mutex mutex; u32 usb_id; int next_midi_device; @@ -125,8 +126,10 @@ struct snd_usb_midi { struct snd_usb_midi_in_endpoint *in; } endpoints[MIDI_MAX_ENDPOINTS]; unsigned long input_triggered; - unsigned int opened; + bool autopm_reference; + unsigned int opened[2]; unsigned char disconnected; + unsigned char input_running; struct snd_kcontrol *roland_load_ctl; }; @@ -1032,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi) snd_usbmidi_input_start(&umidi->list); } -static void substream_open(struct snd_rawmidi_substream *substream, int open) +static int substream_open(struct snd_rawmidi_substream *substream, int dir, + int open) { struct snd_usb_midi* umidi = substream->rmidi->private_data; struct snd_kcontrol *ctl; + int err; + + down_read(&umidi->disc_rwsem); + if (umidi->disconnected) { + up_read(&umidi->disc_rwsem); + return open ? -ENODEV : 0; + } mutex_lock(&umidi->mutex); if (open) { - if (umidi->opened++ == 0 && umidi->roland_load_ctl) { - ctl = umidi->roland_load_ctl; - ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(umidi->card, + if (!umidi->opened[0] && !umidi->opened[1]) { + err = usb_autopm_get_interface(umidi->iface); + umidi->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) { + mutex_unlock(&umidi->mutex); + up_read(&umidi->disc_rwsem); + return -EIO; + } + if (umidi->roland_load_ctl) { + ctl = umidi->roland_load_ctl; + ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); - update_roland_altsetting(umidi); + update_roland_altsetting(umidi); + } } + umidi->opened[dir]++; + if (umidi->opened[1]) + snd_usbmidi_input_start(&umidi->list); } else { - if (--umidi->opened == 0 && umidi->roland_load_ctl) { - ctl = umidi->roland_load_ctl; - ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; - snd_ctl_notify(umidi->card, + umidi->opened[dir]--; + if (!umidi->opened[1]) + snd_usbmidi_input_stop(&umidi->list); + if (!umidi->opened[0] && !umidi->opened[1]) { + if (umidi->roland_load_ctl) { + ctl = umidi->roland_load_ctl; + ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; + snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); + } + if (umidi->autopm_reference) + usb_autopm_put_interface(umidi->iface); } } mutex_unlock(&umidi->mutex); + up_read(&umidi->disc_rwsem); + return 0; } static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) @@ -1062,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) struct snd_usb_midi* umidi = substream->rmidi->private_data; struct usbmidi_out_port* port = NULL; int i, j; - int err; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) if (umidi->endpoints[i].out) @@ -1075,22 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) snd_BUG(); return -ENXIO; } - err = usb_autopm_get_interface(umidi->iface); - if (err < 0) - return -EIO; + substream->runtime->private_data = port; port->state = STATE_UNKNOWN; - substream_open(substream, 1); - return 0; + return substream_open(substream, 0, 1); } static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { - struct snd_usb_midi* umidi = substream->rmidi->private_data; - - substream_open(substream, 0); - usb_autopm_put_interface(umidi->iface); - return 0; + return substream_open(substream, 0, 0); } static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up) @@ -1143,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream) { - substream_open(substream, 1); - return 0; + return substream_open(substream, 1, 1); } static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream) { - substream_open(substream, 0); - return 0; + return substream_open(substream, 1, 0); } static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) @@ -1399,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p) * a timer may submit an URB. To reliably break the cycle * a flag under lock must be used */ + down_write(&umidi->disc_rwsem); spin_lock_irq(&umidi->disc_lock); umidi->disconnected = 1; spin_unlock_irq(&umidi->disc_lock); + up_write(&umidi->disc_rwsem); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; if (ep->out) @@ -2056,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p) unsigned int i, j; umidi = list_entry(p, struct snd_usb_midi, list); + if (!umidi->input_running) + return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; if (ep->in) for (j = 0; j < INPUT_URBS; ++j) usb_kill_urb(ep->in->urbs[j]); } + umidi->input_running = 0; } static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep) @@ -2086,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p) int i; umidi = list_entry(p, struct snd_usb_midi, list); + if (umidi->input_running || !umidi->opened[1]) + return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + umidi->input_running = 1; } /* @@ -2113,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card, umidi->usb_protocol_ops = &snd_usbmidi_standard_ops; init_timer(&umidi->error_timer); spin_lock_init(&umidi->disc_lock); + init_rwsem(&umidi->disc_rwsem); mutex_init(&umidi->mutex); umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor), le16_to_cpu(umidi->dev->descriptor.idProduct)); @@ -2225,9 +2257,6 @@ int snd_usbmidi_create(struct snd_card *card, } list_add_tail(&umidi->list, midi_list); - - for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); return 0; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index ab23869c01bb..15b0712b9ad5 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -287,25 +287,32 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int v unsigned char buf[2]; int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; int timeout = 10; - int err; + int idx = 0, err; err = snd_usb_autoresume(cval->mixer->chip); if (err < 0) return -EIO; + down_read(&chip->shutdown_rwsem); while (timeout-- > 0) { + if (chip->shutdown) + break; + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), - buf, val_len) >= val_len) { + validx, idx, buf, val_len) >= val_len) { *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); - snd_usb_autosuspend(cval->mixer->chip); - return 0; + err = 0; + goto out; } } - snd_usb_autosuspend(cval->mixer->chip); snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type); - return -EINVAL; + request, validx, idx, cval->val_type); + err = -EINVAL; + + out: + up_read(&chip->shutdown_rwsem); + snd_usb_autosuspend(cval->mixer->chip); + return err; } static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) @@ -313,7 +320,7 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */ unsigned char *val; - int ret, size; + int idx = 0, ret, size; __u8 bRequest; if (request == UAC_GET_CUR) { @@ -330,16 +337,22 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v if (ret) goto error; - ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, + down_read(&chip->shutdown_rwsem); + if (chip->shutdown) + ret = -ENODEV; + else { + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); + ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), - buf, size); + validx, idx, buf, size); + } + up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(chip); if (ret < 0) { error: snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type); + request, validx, idx, cval->val_type); return ret; } @@ -417,7 +430,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2]; - int val_len, err, timeout = 10; + int idx = 0, val_len, err, timeout = 10; if (cval->mixer->protocol == UAC_VERSION_1) { val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; @@ -440,19 +453,27 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, err = snd_usb_autoresume(chip); if (err < 0) return -EIO; - while (timeout-- > 0) + down_read(&chip->shutdown_rwsem); + while (timeout-- > 0) { + if (chip->shutdown) + break; + idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, - validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), - buf, val_len) >= 0) { - snd_usb_autosuspend(chip); - return 0; + validx, idx, buf, val_len) >= 0) { + err = 0; + goto out; } - snd_usb_autosuspend(chip); + } snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", - request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type, buf[0], buf[1]); - return -EINVAL; + request, validx, idx, cval->val_type, buf[0], buf[1]); + err = -EINVAL; + + out: + up_read(&chip->shutdown_rwsem); + snd_usb_autosuspend(chip); + return err; } static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value) @@ -1218,16 +1239,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void } channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; + if (hdr->bLength < 7 + csize) { + snd_printk(KERN_ERR "usbaudio: unit %u: " + "invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; - } - - if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) { - snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid); - return -EINVAL; + if (hdr->bLength < 6 + csize) { + snd_printk(KERN_ERR "usbaudio: unit %u: " + "invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } } /* parse the source unit */ @@ -1247,6 +1275,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void /* disable non-functional volume control */ master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; + case USB_ID(0x1130, 0xf211): + snd_printk(KERN_INFO + "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n"); + /* disable non-functional volume control */ + channels = 0; + break; + } if (channels > 0) first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize); diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index ab125ee0b0f0..38a607a7ae2c 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -186,6 +186,11 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e if (value > 1) return -EINVAL; changed = value != mixer->audigy2nx_leds[index]; + down_read(&mixer->chip->shutdown_rwsem); + if (mixer->chip->shutdown) { + err = -ENODEV; + goto out; + } if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, @@ -202,6 +207,8 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, value, index + 2, NULL, 0); + out: + up_read(&mixer->chip->shutdown_rwsem); if (err < 0) return err; mixer->audigy2nx_leds[index] = value; @@ -295,11 +302,16 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry, for (i = 0; jacks[i].name; ++i) { snd_iprintf(buffer, "%s: ", jacks[i].name); - err = snd_usb_ctl_msg(mixer->chip->dev, + down_read(&mixer->chip->shutdown_rwsem); + if (mixer->chip->shutdown) + err = 0; + else + err = snd_usb_ctl_msg(mixer->chip->dev, usb_rcvctrlpipe(mixer->chip->dev, 0), UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, jacks[i].unitid << 8, buf, 3); + up_read(&mixer->chip->shutdown_rwsem); if (err == 3 && (buf[0] == 3 || buf[0] == 6)) snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]); else @@ -329,10 +341,15 @@ static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol, else new_status = old_status & ~0x02; changed = new_status != old_status; - err = snd_usb_ctl_msg(mixer->chip->dev, + down_read(&mixer->chip->shutdown_rwsem); + if (mixer->chip->shutdown) + err = -ENODEV; + else + err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 50, 0, &new_status, 1); + up_read(&mixer->chip->shutdown_rwsem); if (err < 0) return err; mixer->xonar_u1_status = new_status; @@ -371,11 +388,17 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, u8 bRequest = (kcontrol->private_value >> 16) & 0xff; u16 wIndex = kcontrol->private_value & 0xffff; u8 tmp; + int ret; - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, + down_read(&mixer->chip->shutdown_rwsem); + if (mixer->chip->shutdown) + ret = -ENODEV; + else + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, cpu_to_le16(wIndex), &tmp, sizeof(tmp), 1000); + up_read(&mixer->chip->shutdown_rwsem); if (ret < 0) { snd_printk(KERN_ERR @@ -396,11 +419,17 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, u8 bRequest = (kcontrol->private_value >> 16) & 0xff; u16 wIndex = kcontrol->private_value & 0xffff; u16 wValue = ucontrol->value.integer.value[0]; + int ret; - int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, + down_read(&mixer->chip->shutdown_rwsem); + if (mixer->chip->shutdown) + ret = -ENODEV; + else + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, cpu_to_le16(wValue), cpu_to_le16(wIndex), NULL, 0, 1000); + up_read(&mixer->chip->shutdown_rwsem); if (ret < 0) { snd_printk(KERN_ERR diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 67a4d6dbb32b..4a99f6ca4b66 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -67,6 +67,8 @@ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream unsigned int hwptr_done; subs = (struct snd_usb_substream *)substream->runtime->private_data; + if (subs->stream->chip->shutdown) + return SNDRV_PCM_POS_XRUN; spin_lock(&subs->lock); hwptr_done = subs->hwptr_done; substream->runtime->delay = snd_usb_pcm_delay(subs, @@ -373,8 +375,14 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, changed = subs->cur_audiofmt != fmt || subs->period_bytes != params_period_bytes(hw_params) || subs->cur_rate != rate; + + down_read(&subs->stream->chip->shutdown_rwsem); + if (subs->stream->chip->shutdown) { + ret = -ENODEV; + goto unlock; + } if ((ret = set_format(subs, fmt)) < 0) - return ret; + goto unlock; if (subs->cur_rate != rate) { struct usb_host_interface *alts; @@ -383,12 +391,11 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, alts = &iface->altsetting[fmt->altset_idx]; ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate); if (ret < 0) - return ret; + goto unlock; subs->cur_rate = rate; } if (changed) { - mutex_lock(&subs->stream->chip->shutdown_mutex); /* format changed */ snd_usb_release_substream_urbs(subs, 0); /* influenced: period_bytes, channels, rate, format, */ @@ -396,9 +403,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, params_rate(hw_params), snd_pcm_format_physical_width(params_format(hw_params)) * params_channels(hw_params)); - mutex_unlock(&subs->stream->chip->shutdown_mutex); } +unlock: + up_read(&subs->stream->chip->shutdown_rwsem); return ret; } @@ -414,9 +422,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; - mutex_lock(&subs->stream->chip->shutdown_mutex); + down_read(&subs->stream->chip->shutdown_rwsem); snd_usb_release_substream_urbs(subs, 0); - mutex_unlock(&subs->stream->chip->shutdown_mutex); + up_read(&subs->stream->chip->shutdown_rwsem); return snd_pcm_lib_free_vmalloc_buffer(substream); } @@ -429,12 +437,18 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = runtime->private_data; + int ret = 0; if (! subs->cur_audiofmt) { snd_printk(KERN_ERR "usbaudio: no format is specified!\n"); return -ENXIO; } + down_read(&subs->stream->chip->shutdown_rwsem); + if (subs->stream->chip->shutdown) { + ret = -ENODEV; + goto unlock; + } /* some unit conversions in runtime */ subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize); subs->curframesize = bytes_to_frames(runtime, subs->curpacksize); @@ -447,7 +461,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) subs->last_frame_number = 0; runtime->delay = 0; - return snd_usb_substream_prepare(subs, runtime); + ret = snd_usb_substream_prepare(subs, runtime); + unlock: + up_read(&subs->stream->chip->shutdown_rwsem); + return ret; } static struct snd_pcm_hardware snd_usb_hardware = @@ -500,7 +517,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs, return 0; } /* check whether the period time is >= the data packet interval */ - if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) { + if (subs->speed != USB_SPEED_FULL) { ptime = 125 * (1 << fp->datainterval); if (ptime > pt->max || (ptime == pt->max && pt->openmax)) { hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max); @@ -778,7 +795,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre return err; param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME; - if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) + if (subs->speed == USB_SPEED_FULL) /* full speed devices have fixed data packet interval */ ptmin = 1000; if (ptmin == 1000) diff --git a/sound/usb/proc.c b/sound/usb/proc.c index 961c9a250686..aef03db3870d 100644 --- a/sound/usb/proc.c +++ b/sound/usb/proc.c @@ -107,7 +107,7 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s } snd_iprintf(buffer, "\n"); } - if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) + if (subs->speed != USB_SPEED_FULL) snd_iprintf(buffer, " Data packet interval: %d us\n", 125 * (1 << fp->datainterval)); // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize); @@ -128,7 +128,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn snd_iprintf(buffer, "]\n"); snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize); snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", - snd_usb_get_speed(subs->dev) == USB_SPEED_FULL + subs->speed == USB_SPEED_FULL ? get_full_speed_hz(subs->freqm) : get_high_speed_hz(subs->freqm), subs->freqm >> 16, subs->freqm & 0xffff); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index d89ab4c7d44b..fa4c2f7e6aba 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1658,7 +1658,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { /* .vendor_name = "Roland", */ /* .product_name = "A-PRO", */ - .ifnum = 1, + .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = & (const struct snd_usb_midi_endpoint_info) { .out_cables = 0x0003, @@ -2751,6 +2751,59 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, +/* Microsoft XboxLive Headset/Xbox Communicator */ +{ + USB_DEVICE(0x045e, 0x0283), + .bInterfaceClass = USB_CLASS_PER_INTERFACE, + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Microsoft", + .product_name = "XboxLive Headset/Xbox Communicator", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + /* playback */ + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 1, + .iface = 0, + .altsetting = 0, + .altset_idx = 0, + .attributes = 0, + .endpoint = 0x04, + .ep_attr = 0x05, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 22050, + .rate_max = 22050 + } + }, + { + /* capture */ + .ifnum = 1, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 1, + .iface = 1, + .altsetting = 0, + .altset_idx = 0, + .attributes = 0, + .endpoint = 0x85, + .ep_attr = 0x05, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 16000, + .rate_max = 16000 + } + }, + { + .ifnum = -1 + } + } + } +}, + { /* * Some USB MIDI devices don't have an audio control interface, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 27817266867a..c46171a2bf2f 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev) * rules */ err = usb_driver_set_configuration(dev, 2); - if (err < 0) { + if (err < 0) snd_printdd("error usb_driver_set_configuration: %d\n", err); - return -ENODEV; - } + /* Always return an error, so that we stop creating a device + that will just be destroyed and recreated with a new + configuration */ + return -ENODEV; } else snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n"); diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 3e2b03577936..6c805a51d7d7 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -36,7 +36,7 @@ struct snd_usb_audio { struct snd_card *card; struct usb_interface *pm_intf; u32 usb_id; - struct mutex shutdown_mutex; + struct rw_semaphore shutdown_rwsem; unsigned int shutdown:1; unsigned int probing:1; unsigned int autosuspended:1; diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index d9834b362943..2984ffb7bac7 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -106,7 +106,7 @@ static void kvp_acquire_lock(int pool) if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool); - exit(-1); + exit(EXIT_FAILURE); } } @@ -118,7 +118,7 @@ static void kvp_release_lock(int pool) if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { perror("fcntl"); syslog(LOG_ERR, "Failed to release the lock pool: %d", pool); - exit(-1); + exit(EXIT_FAILURE); } } @@ -137,14 +137,19 @@ static void kvp_update_file(int pool) if (!filep) { kvp_release_lock(pool); syslog(LOG_ERR, "Failed to open file, pool: %d", pool); - exit(-1); + exit(EXIT_FAILURE); } bytes_written = fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record), kvp_file_info[pool].num_records, filep); - fflush(filep); + if (ferror(filep) || fclose(filep)) { + kvp_release_lock(pool); + syslog(LOG_ERR, "Failed to write file, pool: %d", pool); + exit(EXIT_FAILURE); + } + kvp_release_lock(pool); } @@ -163,14 +168,19 @@ static void kvp_update_mem_state(int pool) if (!filep) { kvp_release_lock(pool); syslog(LOG_ERR, "Failed to open file, pool: %d", pool); - exit(-1); + exit(EXIT_FAILURE); } - while (!feof(filep)) { + for (;;) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), ENTRIES_PER_BLOCK * num_blocks, filep); + if (ferror(filep)) { + syslog(LOG_ERR, "Failed to read file, pool: %d", pool); + exit(EXIT_FAILURE); + } + if (!feof(filep)) { /* * We have more data to read. @@ -180,7 +190,7 @@ static void kvp_update_mem_state(int pool) if (record == NULL) { syslog(LOG_ERR, "malloc failed"); - exit(-1); + exit(EXIT_FAILURE); } continue; } @@ -191,6 +201,7 @@ static void kvp_update_mem_state(int pool) kvp_file_info[pool].records = record; kvp_file_info[pool].num_records = records_read; + fclose(filep); kvp_release_lock(pool); } static int kvp_file_init(void) @@ -208,7 +219,7 @@ static int kvp_file_init(void) if (access("/var/opt/hyperv", F_OK)) { if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); - exit(-1); + exit(EXIT_FAILURE); } } @@ -232,12 +243,18 @@ static int kvp_file_init(void) fclose(filep); return 1; } - while (!feof(filep)) { + for (;;) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), ENTRIES_PER_BLOCK, filep); + if (ferror(filep)) { + syslog(LOG_ERR, "Failed to read file, pool: %d", + i); + exit(EXIT_FAILURE); + } + if (!feof(filep)) { /* * We have more data to read. @@ -657,13 +674,13 @@ int main(void) if (kvp_file_init()) { syslog(LOG_ERR, "Failed to initialize the pools"); - exit(-1); + exit(EXIT_FAILURE); } fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (fd < 0) { syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); - exit(-1); + exit(EXIT_FAILURE); } addr.nl_family = AF_NETLINK; addr.nl_pad = 0; @@ -675,7 +692,7 @@ int main(void) if (error < 0) { syslog(LOG_ERR, "bind failed; error:%d", error); close(fd); - exit(-1); + exit(EXIT_FAILURE); } sock_opt = addr.nl_groups; setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt)); @@ -695,7 +712,7 @@ int main(void) if (len < 0) { syslog(LOG_ERR, "netlink_send failed; error:%d", len); close(fd); - exit(-1); + exit(EXIT_FAILURE); } pfd.fd = fd; @@ -863,7 +880,7 @@ kvp_done: len = netlink_send(fd, incoming_cn_msg); if (len < 0) { syslog(LOG_ERR, "net_link send failed; error:%d", len); - exit(-1); + exit(EXIT_FAILURE); } } diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index f759f4f097c7..fd2f9221b241 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type) dev->feature_len = 0; dev->num_vq = 0; dev->running = false; + dev->next = NULL; /* * Append to device list. Prepending to a single-linked list is diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 92271d32bc30..2db7ba001c63 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -70,7 +70,7 @@ ifeq ($(ARCH),x86_64) ARCH := x86 IS_X86_64 := 0 ifeq (, $(findstring m32,$(EXTRA_CFLAGS))) - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) endif ifeq (${IS_X86_64}, 1) RAW_ARCH := x86_64 @@ -241,13 +241,13 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c: util/parse-events.y - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ $(OUTPUT)util/pmu-flex.c: util/pmu.l $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c: util/pmu.y - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 223ffdcc0fd8..3487b4b42114 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -1154,19 +1154,13 @@ static int test__parse_events(void) return ret; } -static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, - size_t *sizep) +static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) { - cpu_set_t *mask; - size_t size; int i, cpu = -1, nrcpus = 1024; realloc: - mask = CPU_ALLOC(nrcpus); - size = CPU_ALLOC_SIZE(nrcpus); - CPU_ZERO_S(size, mask); + CPU_ZERO(maskp); - if (sched_getaffinity(pid, size, mask) == -1) { - CPU_FREE(mask); + if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { if (errno == EINVAL && nrcpus < (1024 << 8)) { nrcpus = nrcpus << 2; goto realloc; @@ -1176,19 +1170,14 @@ realloc: } for (i = 0; i < nrcpus; i++) { - if (CPU_ISSET_S(i, size, mask)) { - if (cpu == -1) { + if (CPU_ISSET(i, maskp)) { + if (cpu == -1) cpu = i; - *maskp = mask; - *sizep = size; - } else - CPU_CLR_S(i, size, mask); + else + CPU_CLR(i, maskp); } } - if (cpu == -1) - CPU_FREE(mask); - return cpu; } @@ -1199,8 +1188,8 @@ static int test__PERF_RECORD(void) .freq = 10, .mmap_pages = 256, }; - cpu_set_t *cpu_mask = NULL; - size_t cpu_mask_size = 0; + cpu_set_t cpu_mask; + size_t cpu_mask_size = sizeof(cpu_mask); struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); struct perf_evsel *evsel; struct perf_sample sample; @@ -1265,8 +1254,7 @@ static int test__PERF_RECORD(void) evsel->attr.sample_type |= PERF_SAMPLE_TIME; perf_evlist__config_attrs(evlist, &opts); - err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, - &cpu_mask_size); + err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); goto out_delete_evlist; @@ -1277,9 +1265,9 @@ static int test__PERF_RECORD(void) /* * So that we can check perf_sample.cpu on all the samples. */ - if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { + if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", strerror(errno)); - goto out_free_cpu_mask; + goto out_delete_evlist; } /* @@ -1472,8 +1460,6 @@ found_exit: } out_err: perf_evlist__munmap(evlist); -out_free_cpu_mask: - CPU_FREE(cpu_mask); out_delete_evlist: perf_evlist__delete(evlist); out: diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index d9637da7333c..3f35ea3f5047 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -1,5 +1,4 @@ -%name-prefix "parse_events_" %parse-param {struct list_head *list_all} %parse-param {struct list_head *list_event} %parse-param {int *idx} diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index 20ea77e93169..522943f7bfdf 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -1,5 +1,4 @@ -%name-prefix "perf_pmu_" %parse-param {struct list_head *format} %parse-param {char *name} diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index a93e06cfcc2a..cf397bd26d0c 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -111,7 +111,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo; export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS # check if compiler option is supported -cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} +cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} # use '-Os' optimization if available, else use -O2 OPTIMIZATION := $(call cc-supports,-Os,-O2) diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c index af0f22fb1ef7..aca6edcbbc6f 100644 --- a/usr/gen_init_cpio.c +++ b/usr/gen_init_cpio.c @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location, int retval; int rc = -1; int namesize; - int i; + unsigned int i; mode |= S_IFREG; @@ -381,25 +381,28 @@ error: static char *cpio_replace_env(char *new_location) { - char expanded[PATH_MAX + 1]; - char env_var[PATH_MAX + 1]; - char *start; - char *end; - - for (start = NULL; (start = strstr(new_location, "${")); ) { - end = strchr(start, '}'); - if (start < end) { - *env_var = *expanded = '\0'; - strncat(env_var, start + 2, end - start - 2); - strncat(expanded, new_location, start - new_location); - strncat(expanded, getenv(env_var), PATH_MAX); - strncat(expanded, end + 1, PATH_MAX); - strncpy(new_location, expanded, PATH_MAX); - } else - break; - } - - return new_location; + char expanded[PATH_MAX + 1]; + char env_var[PATH_MAX + 1]; + char *start; + char *end; + + for (start = NULL; (start = strstr(new_location, "${")); ) { + end = strchr(start, '}'); + if (start < end) { + *env_var = *expanded = '\0'; + strncat(env_var, start + 2, end - start - 2); + strncat(expanded, new_location, start - new_location); + strncat(expanded, getenv(env_var), + PATH_MAX - strlen(expanded)); + strncat(expanded, end + 1, + PATH_MAX - strlen(expanded)); + strncpy(new_location, expanded, PATH_MAX); + new_location[PATH_MAX] = 0; + } else + break; + } + + return new_location; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9739b533ca2e..71b9036644a1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -693,8 +693,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int r; gfn_t base_gfn; unsigned long npages; - unsigned long i; - struct kvm_memory_slot *memslot; + struct kvm_memory_slot *memslot, *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; @@ -741,13 +740,11 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; - - if (s == memslot || !s->npages) + kvm_for_each_memslot(slot, kvm->memslots) { + if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot) continue; - if (!((base_gfn + npages <= s->base_gfn) || - (base_gfn >= s->base_gfn + s->npages))) + if (!((base_gfn + npages <= slot->base_gfn) || + (base_gfn >= slot->base_gfn + slot->npages))) goto out_free; } |