diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-11-06 06:50:21 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 06:50:21 +0100 |
commit | 164777530b8853d92b52956312078190e2c5eeb1 (patch) | |
tree | 88fa0de7577b106837f041d00dce12a1fa6d2ac6 | |
parent | 09dc68d958c67c76cf672ec78b7391af453010f8 (diff) | |
parent | 5e01dc7b26d9f24f39abace5da98ccbd6a5ceb52 (diff) |
Merge tag 'v3.12' into x86/cpu, to refresh the branch before queueing up more changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
120 files changed, 1142 insertions, 864 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index ebaf8bd7a570..ffcaf975bed7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1009,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support M: Jason Cooper <jason@lakedaemon.net> M: Andrew Lunn <andrew@lunn.ch> M: Gregory Clement <gregory.clement@free-electrons.com> +M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-mvebu/ @@ -1016,6 +1017,7 @@ F: arch/arm/mach-mvebu/ ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support M: Jason Cooper <jason@lakedaemon.net> M: Andrew Lunn <andrew@lunn.ch> +M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-dove/ @@ -1148,6 +1150,13 @@ F: drivers/net/ethernet/i825xx/ether1* F: drivers/net/ethernet/seeq/ether3* F: drivers/scsi/arm/ +ARM/Rockchip SoC support +M: Heiko Stuebner <heiko@sntech.de> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-rockchip/ +F: drivers/*/*rockchip* + ARM/SHARK MACHINE SUPPORT M: Alexander Schulz <alex@shark-linux.de> W: http://www.shark-linux.de/shark.html @@ -2719,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul <vinod.koul@intel.com> M: Dan Williams <dan.j.williams@intel.com> +L: dmaengine@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Supported F: drivers/dma/ F: include/linux/dma* @@ -2822,7 +2833,7 @@ M: Terje Bergström <tbergstrom@nvidia.com> L: dri-devel@lists.freedesktop.org L: linux-tegra@vger.kernel.org T: git git://anongit.freedesktop.org/tegra/linux.git -S: Maintained +S: Supported F: drivers/gpu/host1x/ F: include/uapi/drm/tegra_drm.h F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt @@ -4358,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER M: Dan Williams <dan.j.williams@intel.com> -S: Maintained +M: Dave Jiang <dave.jiang@intel.com> +L: dmaengine@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ +S: Supported F: drivers/dma/ioat* INTEL IOMMU (VT-d) @@ -8302,14 +8316,72 @@ L: linux-media@vger.kernel.org S: Maintained F: drivers/media/rc/ttusbir.c -TEGRA SUPPORT +TEGRA ARCHITECTURE SUPPORT M: Stephen Warren <swarren@wwwdotorg.org> +M: Thierry Reding <thierry.reding@gmail.com> L: linux-tegra@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git S: Supported N: [^a-z]tegra +TEGRA ASOC DRIVER +M: Stephen Warren <swarren@wwwdotorg.org> +S: Supported +F: sound/soc/tegra/ + +TEGRA CLOCK DRIVER +M: Peter De Schrijver <pdeschrijver@nvidia.com> +M: Prashant Gaikwad <pgaikwad@nvidia.com> +S: Supported +F: drivers/clk/tegra/ + +TEGRA DMA DRIVER +M: Laxman Dewangan <ldewangan@nvidia.com> +S: Supported +F: drivers/dma/tegra20-apb-dma.c + +TEGRA GPIO DRIVER +M: Stephen Warren <swarren@wwwdotorg.org> +S: Supported +F: drivers/gpio/gpio-tegra.c + +TEGRA I2C DRIVER +M: Laxman Dewangan <ldewangan@nvidia.com> +S: Supported +F: drivers/i2c/busses/i2c-tegra.c + +TEGRA IOMMU DRIVERS +M: Hiroshi Doyu <hdoyu@nvidia.com> +S: Supported +F: drivers/iommu/tegra* + +TEGRA KBC DRIVER +M: Rakesh Iyer <riyer@nvidia.com> +M: Laxman Dewangan <ldewangan@nvidia.com> +S: Supported +F: drivers/input/keyboard/tegra-kbc.c + +TEGRA PINCTRL DRIVER +M: Stephen Warren <swarren@wwwdotorg.org> +S: Supported +F: drivers/pinctrl/pinctrl-tegra* + +TEGRA PWM DRIVER +M: Thierry Reding <thierry.reding@gmail.com> +S: Supported +F: drivers/pwm/pwm-tegra.c + +TEGRA SERIAL DRIVER +M: Laxman Dewangan <ldewangan@nvidia.com> +S: Supported +F: drivers/tty/serial/serial-tegra.c + +TEGRA SPI DRIVER +M: Laxman Dewangan <ldewangan@nvidia.com> +S: Supported +F: drivers/spi/spi-tegra* + TEHUTI ETHERNET DRIVER M: Andy Gospodarek <andy@greyhouse.net> L: netdev@vger.kernel.org @@ -8845,61 +8917,14 @@ W: http://pegasus2.sourceforge.net/ S: Maintained F: drivers/net/usb/rtl8150.c -USB SERIAL BELKIN F5U103 DRIVER -M: William Greathouse <wgreathouse@smva.com> -L: linux-usb@vger.kernel.org -S: Maintained -F: drivers/usb/serial/belkin_sa.* - -USB SERIAL CYPRESS M8 DRIVER -M: Lonnie Mendez <dignome@gmail.com> -L: linux-usb@vger.kernel.org -S: Maintained -W: http://geocities.com/i0xox0i -W: http://firstlight.net/cvs -F: drivers/usb/serial/cypress_m8.* - -USB SERIAL CYBERJACK DRIVER -M: Matthias Bruestle and Harald Welte <support@reiner-sct.com> -W: http://www.reiner-sct.de/support/treiber_cyberjack.php -S: Maintained -F: drivers/usb/serial/cyberjack.c - -USB SERIAL DIGI ACCELEPORT DRIVER -M: Peter Berger <pberger@brimson.com> -M: Al Borchers <alborchers@steinerpoint.com> +USB SERIAL SUBSYSTEM +M: Johan Hovold <jhovold@gmail.com> L: linux-usb@vger.kernel.org S: Maintained -F: drivers/usb/serial/digi_acceleport.c - -USB SERIAL DRIVER -M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -L: linux-usb@vger.kernel.org -S: Supported F: Documentation/usb/usb-serial.txt -F: drivers/usb/serial/generic.c -F: drivers/usb/serial/usb-serial.c +F: drivers/usb/serial/ F: include/linux/usb/serial.h -USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER -M: Gary Brubaker <xavyer@ix.netcom.com> -L: linux-usb@vger.kernel.org -S: Maintained -F: drivers/usb/serial/empeg.c - -USB SERIAL KEYSPAN DRIVER -M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -L: linux-usb@vger.kernel.org -S: Maintained -F: drivers/usb/serial/*keyspan* - -USB SERIAL WHITEHEAT DRIVER -M: Support Department <support@connecttech.com> -L: linux-usb@vger.kernel.org -W: http://www.connecttech.com -S: Supported -F: drivers/usb/serial/whiteheat* - USB SMSC75XX ETHERNET DRIVER M: Steve Glendinning <steve.glendinning@shawell.net> L: netdev@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = NAME = One Giant Leap for Frogkind # *DOCUMENTATION* diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index d63f3de0cd5b..0c14d8a52683 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -17,7 +17,7 @@ #include <asm/pgalloc.h> #include <asm/mmu.h> -static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) +static int handle_vmalloc_fault(unsigned long address) { /* * Synchronize this task's top level page-table @@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; - pgd = pgd_offset_fast(mm, address); + pgd = pgd_offset_fast(current->active_mm, address); pgd_k = pgd_offset_k(address); if (!pgd_present(*pgd_k)) @@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address) * nothing more. */ if (address >= VMALLOC_START && address <= VMALLOC_END) { - ret = handle_vmalloc_fault(mm, address); + ret = handle_vmalloc_fault(address); if (unlikely(ret)) goto bad_area_nosemaphore; else diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts index ff1aea0ee043..72693a69f830 100644 --- a/arch/arm/boot/dts/integratorcp.dts +++ b/arch/arm/boot/dts/integratorcp.dts @@ -9,11 +9,6 @@ model = "ARM Integrator/CP"; compatible = "arm,integrator-cp"; - aliases { - arm,timer-primary = &timer2; - arm,timer-secondary = &timer1; - }; - chosen { bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; }; @@ -24,14 +19,18 @@ }; timer0: timer@13000000 { + /* TIMER0 runs @ 25MHz */ compatible = "arm,integrator-cp-timer"; + status = "disabled"; }; timer1: timer@13000100 { + /* TIMER1 runs @ 1MHz */ compatible = "arm,integrator-cp-timer"; }; timer2: timer@13000200 { + /* TIMER2 runs @ 1MHz */ compatible = "arm,integrator-cp-timer"; }; diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 45f1ffcf1a4b..24cdf64789c3 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -971,11 +971,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, - [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, - [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, }, }, [C(ITLB)] = { diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index c69da3734699..5b28e81d94a0 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -473,7 +473,7 @@ static void __init fill_ipi_map(void) { int cpu; - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1); fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2); } @@ -574,8 +574,9 @@ void __init arch_init_irq(void) /* FIXME */ int i; #if defined(CONFIG_MIPS_MT_SMP) - gic_call_int_base = GIC_NUM_INTRS - NR_CPUS; - gic_resched_int_base = gic_call_int_base - NR_CPUS; + gic_call_int_base = GIC_NUM_INTRS - + (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids; + gic_resched_int_base = gic_call_int_base - nr_cpu_ids; fill_ipi_map(); #endif gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, @@ -599,7 +600,7 @@ void __init arch_init_irq(void) printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status()); write_c0_status(0x1100dc00); printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status()); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { arch_init_ipiirq(MIPS_GIC_IRQ_BASE + GIC_RESCHED_INT(i), &irq_resched); arch_init_ipiirq(MIPS_GIC_IRQ_BASE + diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c index e49241a2c39a..202785709441 100644 --- a/arch/mips/ralink/timer.c +++ b/arch/mips/ralink/timer.c @@ -126,7 +126,7 @@ static int rt_timer_probe(struct platform_device *pdev) return -ENOENT; } - rt->membase = devm_request_and_ioremap(&pdev->dev, res); + rt->membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rt->membase)) return PTR_ERR(rt->membase); diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 37aabd772fbb..d2d58258aea6 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -195,6 +195,8 @@ common_stext: ldw MEM_PDC_HI(%r0),%r6 depd %r6, 31, 32, %r3 /* move to upper word */ + mfctl %cr30,%r6 /* PCX-W2 firmware bug */ + ldo PDC_PSW(%r0),%arg0 /* 21 */ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ @@ -203,6 +205,8 @@ common_stext: copy %r0,%arg3 stext_pdc_ret: + mtctl %r6,%cr30 /* restore task thread info */ + /* restore rfi target address*/ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 tophys_r1 %r10 diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c index 829df49dee99..41ebbfebb333 100644 --- a/arch/um/kernel/exitcode.c +++ b/arch/um/kernel/exitcode.c @@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char *end, buf[sizeof("nnnnn\0")]; + size_t size; int tmp; - if (copy_from_user(buf, buffer, count)) + size = min(count, sizeof(buf)); + if (copy_from_user(buf, buffer, size)) return -EFAULT; tmp = simple_strtol(buf, &end, 0); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0da5200ee79d..b3e18f800302 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -128,7 +128,8 @@ do { \ do { \ typedef typeof(var) pao_T__; \ const int pao_ID__ = (__builtin_constant_p(val) && \ - ((val) == 1 || (val) == -1)) ? (val) : 0; \ + ((val) == 1 || (val) == -1)) ? \ + (int)(val) : 0; \ if (0) { \ pao_T__ pao_tmp__; \ pao_tmp__ = (val); \ diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d8449158cf9..8a87a3224121 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void) static int __kprobes perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) { - int ret; u64 start_clock; u64 finish_clock; + int ret; if (!atomic_read(&active_events)) return NMI_DONE; - start_clock = local_clock(); + start_clock = sched_clock(); ret = x86_pmu.handle_irq(regs); - finish_clock = local_clock(); + finish_clock = sched_clock(); perf_sample_event_took(finish_clock - start_clock); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a0e2a8a80c94..b2046e4d0b59 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug; struct dentry *kvm_init_debugfs(void) { - d_kvm_debug = debugfs_create_dir("kvm", NULL); + d_kvm_debug = debugfs_create_dir("kvm-guest", NULL); if (!d_kvm_debug) printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index ba77ebc2c353..6fcb49ce50a1 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 u64 before, delta, whole_msecs; int remainder_ns, decimal_msecs, thishandled; - before = local_clock(); + before = sched_clock(); thishandled = a->handler(type, regs); handled += thishandled; - delta = local_clock() - before; + delta = sched_clock() - before; trace_nmi_handler(a->handler, (int)delta, thishandled); if (delta < nmi_longest_ns) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index de1dfa18d0a1..21dbe6bdb8ed 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers) * a3: exctable, original value in excsave1 */ -fast_syscall_spill_registers_fixup: +ENTRY(fast_syscall_spill_registers_fixup) rsr a2, windowbase # get current windowbase (a2 is saved) xsr a0, depc # restore depc and a0 @@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup: */ xsr a3, excsave1 # get spill-mask - slli a2, a3, 1 # shift left by one + slli a3, a3, 1 # shift left by one - slli a3, a2, 32-WSBITS - src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... + slli a2, a3, 32-WSBITS + src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... wsr a2, windowstart # set corrected windowstart - rsr a3, excsave1 - l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 - l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) + srli a3, a3, 1 + rsr a2, excsave1 + l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 + xsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 + l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) + xsr a2, excsave1 /* Return to the original (user task) WINDOWBASE. * We leave the following frame behind: * a0, a1, a2 same - * a3: trashed (saved in excsave_1) + * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) * depc: depc (we have to return to that address) - * excsave_1: a3 + * excsave_1: exctable */ wsr a3, windowbase @@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup: * a0: return address * a1: used, stack pointer * a2: kernel stack pointer - * a3: available, saved in EXCSAVE_1 + * a3: available * depc: exception address - * excsave: a3 + * excsave: exctable * Note: This frame might be the same as above. */ @@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup: rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE jx a0 -fast_syscall_spill_registers_fixup_return: +ENDPROC(fast_syscall_spill_registers_fixup) + +ENTRY(fast_syscall_spill_registers_fixup_return) /* When we return here, all registers have been restored (a2: DEPC) */ @@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return: /* Restore fixup handler. */ - xsr a3, excsave1 - movi a2, fast_syscall_spill_registers_fixup - s32i a2, a3, EXC_TABLE_FIXUP - s32i a0, a3, EXC_TABLE_DOUBLE_SAVE - rsr a2, windowbase - s32i a2, a3, EXC_TABLE_PARAM - l32i a2, a3, EXC_TABLE_KSTK + rsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE + movi a3, fast_syscall_spill_registers_fixup + s32i a3, a2, EXC_TABLE_FIXUP + rsr a3, windowbase + s32i a3, a2, EXC_TABLE_PARAM + l32i a2, a2, EXC_TABLE_KSTK /* Load WB at the time the exception occurred. */ @@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return: wsr a3, windowbase rsync + rsr a3, excsave1 + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE + rfde +ENDPROC(fast_syscall_spill_registers_fixup_return) /* * spill all registers. diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 718eca1850bd..98b67d5f1514 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sp = regs->areg[1]; - if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) { + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; } diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 56f88b7afe2f..e9e1aad8c271 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str) return 1; } - if ((new = alloc_bootmem(sizeof new)) == NULL) { + new = alloc_bootmem(sizeof(*new)); + if (new == NULL) { printk("Alloc_bootmem failed\n"); return 1; } diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 51410c2ac2cb..4d978a3c88f7 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -27,6 +27,14 @@ */ #define SRC_CR 0x00U +#define SRC_CR_T0_ENSEL BIT(15) +#define SRC_CR_T1_ENSEL BIT(17) +#define SRC_CR_T2_ENSEL BIT(19) +#define SRC_CR_T3_ENSEL BIT(21) +#define SRC_CR_T4_ENSEL BIT(23) +#define SRC_CR_T5_ENSEL BIT(25) +#define SRC_CR_T6_ENSEL BIT(27) +#define SRC_CR_T7_ENSEL BIT(29) #define SRC_XTALCR 0x0CU #define SRC_XTALCR_XTALTIMEN BIT(20) #define SRC_XTALCR_SXTALDIS BIT(19) @@ -543,6 +551,19 @@ void __init nomadik_clk_init(void) __func__, np->name); return; } + + /* Set all timers to use the 2.4 MHz TIMCLK */ + val = readl(src_base + SRC_CR); + val |= SRC_CR_T0_ENSEL; + val |= SRC_CR_T1_ENSEL; + val |= SRC_CR_T2_ENSEL; + val |= SRC_CR_T3_ENSEL; + val |= SRC_CR_T4_ENSEL; + val |= SRC_CR_T5_ENSEL; + val |= SRC_CR_T6_ENSEL; + val |= SRC_CR_T7_ENSEL; + writel(val, src_base + SRC_CR); + val = readl(src_base + SRC_XTALCR); pr_info("SXTALO is %s\n", (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index fc777bdc1886..81a202d12a7a 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c @@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = { }; static const u32 a370_tclk_freqs[] __initconst = { - 16600000, - 20000000, + 166000000, + 200000000, }; static u32 __init a370_get_tclk_freq(void __iomem *sar) diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 5bb848cac6ec..81dd31a686df 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -49,7 +49,7 @@ #define SOCFPGA_L4_SP_CLK "l4_sp_clk" #define SOCFPGA_NAND_CLK "nand_clk" #define SOCFPGA_NAND_X_CLK "nand_x_clk" -#define SOCFPGA_MMC_CLK "mmc_clk" +#define SOCFPGA_MMC_CLK "sdmmc_clk" #define SOCFPGA_DB_CLK "gpio_db_clk" #define div_mask(width) ((1 << (width)) - 1) diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index 67ccf4aa7277..f5e4c21b301f 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate, vco = icst_hz_to_vco(icst->params, rate); icst->rate = icst_hz(icst->params, vco); - vco_set(icst->vcoreg, icst->lockreg, vco); + vco_set(icst->lockreg, icst->vcoreg, vco); return 0; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d2c3253e015e..506fd23c7550 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void) { int ret; + if (acpi_disabled) + return -ENODEV; + /* don't keep reloading if cpufreq_driver exists */ if (cpufreq_get_current_driver()) - return 0; - - if (acpi_disabled) - return 0; + return -EEXIST; pr_debug("acpi_cpufreq_init\n"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index badf6206b2b2..eb3fdc755000 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { - int core_pct_busy; + int32_t core_pct_busy; u64 aperf; u64 mperf; int freq; @@ -68,7 +68,7 @@ struct _pid { int32_t i_gain; int32_t d_gain; int deadband; - int last_err; + int32_t last_err; }; struct cpudata { @@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); } -static signed int pid_calc(struct _pid *pid, int busy) +static signed int pid_calc(struct _pid *pid, int32_t busy) { - signed int err, result; + signed int result; int32_t pterm, dterm, fp_error; int32_t integral_limit; - err = pid->setpoint - busy; - fp_error = int_tofp(err); + fp_error = int_tofp(pid->setpoint) - busy; - if (abs(err) <= pid->deadband) + if (abs(fp_error) <= int_tofp(pid->deadband)) return 0; pterm = mul_fp(pid->p_gain, fp_error); @@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy) if (pid->integral < -integral_limit) pid->integral = -integral_limit; - dterm = mul_fp(pid->d_gain, (err - pid->last_err)); - pid->last_err = err; + dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); + pid->last_err = fp_error; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; @@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void) static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; + int max_perf_adj; int min_perf; if (limits.no_turbo) max_perf = cpu->pstate.max_pstate; - max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); - *max = clamp_t(int, max_perf, + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); + *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); @@ -436,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; - core_pct = div64_u64(sample->aperf * 100, sample->mperf); - sample->freq = cpu->pstate.max_pstate * core_pct * 1000; + core_pct = div64_u64(int_tofp(sample->aperf * 100), + sample->mperf); + sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); sample->core_pct_busy = core_pct; } @@ -469,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) +static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { - int32_t busy_scaled; int32_t core_busy, max_pstate, current_pstate; - core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); + core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); - busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); - - return fp_toint(busy_scaled); + return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { - int busy_scaled; + int32_t busy_scaled; struct _pid *pid; signed int ctl = 0; int steps; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3519111c566b..10b577fcf48d 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edma_alloc_slot(EDMA_CTLR(echan->ch_num), EDMA_SLOT_ANY); if (echan->slot[i] < 0) { + kfree(edesc); dev_err(dev, "Failed to allocate slot\n"); kfree(edesc); return NULL; @@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( ccnt = sg_dma_len(sg) / (acnt * bcnt); if (ccnt > (SZ_64K - 1)) { dev_err(dev, "Exceeded max SG segment size\n"); + kfree(edesc); return NULL; } cidx = acnt * bcnt; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 05ad9ba0a67e..fe58d0833a11 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data, /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ea9022ef15d5..10d1de5bce6f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, return true; } -static void intel_crt_get_config(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_crt *crt = intel_encoder_to_crt(encoder); @@ -102,7 +101,25 @@ static void intel_crt_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_NVSYNC; - pipe_config->adjusted_mode.flags |= flags; + return flags; +} + +static void intel_crt_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); +} + +static void hsw_crt_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + intel_ddi_get_config(encoder, pipe_config); + + pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | + DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NVSYNC); + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -799,7 +816,10 @@ void intel_crt_init(struct drm_device *dev) crt->base.mode_set = intel_crt_mode_set; crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; - crt->base.get_config = intel_crt_get_config; + if (IS_HASWELL(dev)) + crt->base.get_config = hsw_crt_get_config; + else + crt->base.get_config = intel_crt_get_config; if (I915_HAS_HOTPLUG(dev)) crt->base.hpd_pin = HPD_CRT; if (HAS_DDI(dev)) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 63de2701b974..b53fff84a7d5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) intel_dp_check_link_status(intel_dp); } -static void intel_ddi_get_config(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); @@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->adjusted_mode.flags |= flags; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 581fb4b2f766..d78d33f9337d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } -static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) +static bool pipe_has_enabled_pch(struct intel_crtc *crtc) { - return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; + return crtc->base.enabled && crtc->active && + crtc->config.has_pch_encoder; } static void ivb_modeset_global_resources(struct drm_device *dev) @@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, I915_READ(VSYNCSHIFT(cpu_transcoder))); } +static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t temp; + + temp = I915_READ(SOUTH_CHICKEN1); + if (temp & FDI_BC_BIFURCATION_SELECT) + return; + + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); + + temp |= FDI_BC_BIFURCATION_SELECT; + DRM_DEBUG_KMS("enabling fdi C rx\n"); + I915_WRITE(SOUTH_CHICKEN1, temp); + POSTING_READ(SOUTH_CHICKEN1); +} + +static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + switch (intel_crtc->pipe) { + case PIPE_A: + break; + case PIPE_B: + if (intel_crtc->config.fdi_lanes > 2) + WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); + else + cpt_enable_fdi_bc_bifurcation(dev); + + break; + case PIPE_C: + cpt_enable_fdi_bc_bifurcation(dev); + + break; + default: + BUG(); + } +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs @@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) assert_pch_transcoder_disabled(dev_priv, pipe); + if (IS_IVYBRIDGE(dev)) + ivybridge_update_fdi_bc_bifurcation(intel_crtc); + /* Write the TU size bits before fdi link training, so that error * detection works. */ I915_WRITE(FDI_RX_TUSIZE1(pipe), @@ -4983,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; + if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + switch (tmp & PIPECONF_BPC_MASK) { + case PIPECONF_6BPC: + pipe_config->pipe_bpp = 18; + break; + case PIPECONF_8BPC: + pipe_config->pipe_bpp = 24; + break; + case PIPECONF_10BPC: + pipe_config->pipe_bpp = 30; + break; + default: + break; + } + } + intel_get_pipe_timings(crtc, pipe_config); i9xx_get_pfit_config(crtc, pipe_config); @@ -5576,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, return true; } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp; - - temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) - return; - - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - POSTING_READ(SOUTH_CHICKEN1); -} - -static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - switch (intel_crtc->pipe) { - case PIPE_A: - break; - case PIPE_B: - if (intel_crtc->config.fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); - else - cpt_enable_fdi_bc_bifurcation(dev); - - break; - case PIPE_C: - cpt_enable_fdi_bc_bifurcation(dev); - - break; - default: - BUG(); - } -} - int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) { /* @@ -5811,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, &intel_crtc->config.fdi_m_n); } - if (IS_IVYBRIDGE(dev)) - ivybridge_update_fdi_bc_bifurcation(intel_crtc); - ironlake_set_pipeconf(crtc); /* Set up the display plane register */ @@ -5881,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; + switch (tmp & PIPECONF_BPC_MASK) { + case PIPECONF_6BPC: + pipe_config->pipe_bpp = 18; + break; + case PIPECONF_8BPC: + pipe_config->pipe_bpp = 24; + break; + case PIPECONF_10BPC: + pipe_config->pipe_bpp = 30; + break; + case PIPECONF_12BPC: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; @@ -8612,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_FLAGS diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2c555f91bfae..1a431377d83b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder, else pipe_config->port_clock = 270000; } + + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + } } static bool is_edp_psr(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9b7b68fd5d47..7f2b384ac939 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -765,6 +765,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); extern bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); +extern void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); extern void intel_display_handle_reset(struct drm_device *dev); extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 831a5c021c4b..b8af94a5be39 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -700,6 +700,22 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "Intel D410PT", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), + DMI_MATCH(DMI_BOARD_NAME, "D410PT"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Intel D425KT", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Intel D510MO", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index fe1de855775e..57fcc4b16a52 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -291,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ WREG32(HDMI_ACR_PACKET_CONTROL + offset, + HDMI_ACR_SOURCE | /* select SW CTS value */ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ evergreen_hdmi_update_ACR(encoder, mode->clock); diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 71399065db04..b41905573cd2 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = true; + pi->bapm_enable = false; pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a400ac1c4147..24f4960f59ee 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks struct radeon_clock_and_voltage_limits { u32 sclk; u32 mclk; - u32 vddc; - u32 vddci; + u16 vddc; + u16 vddci; }; struct radeon_clock_array { diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3591855cc5b5..6df23502059a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn) pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - if (device->use_frwr) + if (device && device->use_frwr) isert_conn_free_frwr_pool(isert_conn); if (isert_conn->conn_qp) { diff --git a/drivers/input/input.c b/drivers/input/input.c index c04469928925..e75d015024a1 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class); */ struct input_dev *input_allocate_device(void) { + static atomic_t input_no = ATOMIC_INIT(0); struct input_dev *dev; dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); @@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void) device_initialize(&dev->dev); mutex_init(&dev->mutex); spin_lock_init(&dev->event_lock); + init_timer(&dev->timer); INIT_LIST_HEAD(&dev->h_list); INIT_LIST_HEAD(&dev->node); + dev_set_name(&dev->dev, "input%ld", + (unsigned long) atomic_inc_return(&input_no) - 1); + __module_get(THIS_MODULE); } @@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res) */ int input_register_device(struct input_dev *dev) { - static atomic_t input_no = ATOMIC_INIT(0); struct input_devres *devres = NULL; struct input_handler *handler; unsigned int packet_size; @@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev) * If delay and period are pre-set by the driver, then autorepeating * is handled by the driver itself and we don't do it in input.c. */ - init_timer(&dev->timer); if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { dev->timer.data = (long) dev; dev->timer.function = input_repeat_key; @@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev) if (!dev->setkeycode) dev->setkeycode = input_default_setkeycode; - dev_set_name(&dev->dev, "input%ld", - (unsigned long) atomic_inc_return(&input_no) - 1); - error = device_add(&dev->dev); if (error) goto err_free_vals; diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 134c3b404a54..a2e758d27584 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev) input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_set_capability(input_dev, EV_MSC, MSC_SCAN); - if (pdata) + if (pdata) { error = pxa27x_keypad_build_keycode(keypad); - else + } else { error = pxa27x_keypad_build_keycode_from_dt(keypad); + /* + * Data that we get from DT resides in dynamically + * allocated memory so we need to update our pdata + * pointer. + */ + pdata = keypad->pdata; + } if (error) { dev_err(&pdev->dev, "failed to build keycode\n"); goto failed_put_clk; diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 082684e7f390..9365535ba7f1 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb) if (status) { if (status == -ESHUTDOWN) return; - dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); + dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", + __func__, status); + goto out; } /* Special keys */ @@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb) dev->ctl_data->byte[2], dev->ctl_data->byte[3]); - if (status) - dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); + if (status) { + if (status == -ESHUTDOWN) + return; + dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", + __func__, status); + } spin_lock(&dev->ctl_submit_lock); @@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb) if (likely(!dev->shutdown)) { - if (dev->buzzer_pending) { + if (dev->buzzer_pending || status) { dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 7c5d72a6a26a..83658472ad25 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = { /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, + { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 78e4de42efaa..52c9ebf94729 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -223,21 +223,26 @@ static int i8042_flush(void) { unsigned long flags; unsigned char data, str; - int i = 0; + int count = 0; + int retval = 0; spin_lock_irqsave(&i8042_lock, flags); - while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { - udelay(50); - data = i8042_read_data(); - i++; - dbg("%02x <- i8042 (flush, %s)\n", - data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); + while ((str = i8042_read_status()) & I8042_STR_OBF) { + if (count++ < I8042_BUFFER_SIZE) { + udelay(50); + data = i8042_read_data(); + dbg("%02x <- i8042 (flush, %s)\n", + data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); + } else { + retval = -EIO; + break; + } } spin_unlock_irqrestore(&i8042_lock, flags); - return i; + return retval; } /* @@ -849,7 +854,7 @@ static int __init i8042_check_aux(void) static int i8042_controller_check(void) { - if (i8042_flush() == I8042_BUFFER_SIZE) { + if (i8042_flush()) { pr_err("No controller found\n"); return -ENODEV; } diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 79b69ea47f74..e53416a4d7f3 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom) } static enum power_supply_property wacom_battery_props[] = { + POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CAPACITY }; @@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy, int ret = 0; switch (psp) { + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_DEVICE; + break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = wacom->wacom_wac.battery_capacity * 100 / 31; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b2aa503c16b1..c59b797eeafa 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 = static const struct wacom_features wacom_features_0x10D = { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x10E = + { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, + 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x10F = + { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, + 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x4001 = { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, + { USB_DEVICE_WACOM(0x10E) }, + { USB_DEVICE_WACOM(0x10F) }, { USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x301) }, { USB_DEVICE_WACOM(0x304) }, diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 59ab0692f0b9..a9830ff8e3f3 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this) int common_nfc_set_geometry(struct gpmi_nand_data *this) { - return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); + return legacy_set_geometry(this); } struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index dd03dfdfb0d6..c28d4e29af1a 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) for (cs = 0; cs < pdata->num_cs; cs++) { struct mtd_info *mtd = info->host[cs]->mtd; - mtd->name = pdev->name; + /* + * The mtd name matches the one used in 'mtdparts' kernel + * parameter. This name cannot be changed or otherwise + * user's mtd partitions configuration would get broken. + */ + mtd->name = "pxa3xx_nand-0"; info->cs = cs; ret = pxa3xx_nand_scan(mtd); if (ret) { diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index be12fbfcae10..1ea75236a15f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot) struct acpiphp_func *func; int max, pass; LIST_HEAD(add_list); - int nr_found; - nr_found = acpiphp_rescan_slot(slot); + acpiphp_rescan_slot(slot); max = acpiphp_max_busnr(bus); for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { @@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot) } } __pci_bus_assign_resources(bus, &add_list, NULL); - /* Nothing more to do here if there are no new devices on this bus. */ - if (!nr_found && (slot->flags & SLOT_ENABLED)) - return; acpiphp_sanitize_bus(bus); acpiphp_set_hpp_values(bus); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 408a42ef787a..f0d432c139d0 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 5cbc4bb1b395..df5e961484e1 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; static int sg_add(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *); +static DEFINE_SPINLOCK(sg_open_exclusive_lock); + static DEFINE_IDR(sg_index_idr); -static DEFINE_RWLOCK(sg_index_lock); +static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock + file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add, @@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ - struct list_head sfd_siblings; /* protected by sfd_lock of device */ + /* sfd_siblings is protected by sg_index_lock */ + struct list_head sfd_siblings; struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ @@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; + wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ - spinlock_t sfd_lock; /* protect file descriptor list for device */ + /* sfds is protected by sg_index_lock */ struct list_head sfds; - struct rw_semaphore o_sem; /* exclude open should hold this rwsem */ volatile char detached; /* 0->attached, 1->detached pending removal */ + /* exclude protected by sg_open_exclusive_lock */ char exclude; /* opened for exclusive access */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; @@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } +static int get_exclude(Sg_device *sdp) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&sg_open_exclusive_lock, flags); + ret = sdp->exclude; + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); + return ret; +} + +static int set_exclude(Sg_device *sdp, char val) +{ + unsigned long flags; + + spin_lock_irqsave(&sg_open_exclusive_lock, flags); + sdp->exclude = val; + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); + return val; +} + static int sfds_list_empty(Sg_device *sdp) { unsigned long flags; int ret; - spin_lock_irqsave(&sdp->sfd_lock, flags); + read_lock_irqsave(&sg_index_lock, flags); ret = list_empty(&sdp->sfds); - spin_unlock_irqrestore(&sdp->sfd_lock, flags); + read_unlock_irqrestore(&sg_index_lock, flags); return ret; } @@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp) struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; + int res; int retval; nonseekable_open(inode, filp); @@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp) goto error_out; } - if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { - retval = -EPERM; /* Can't lock it with read only access */ - goto error_out; - } - if (flags & O_NONBLOCK) { - if (flags & O_EXCL) { - if (!down_write_trylock(&sdp->o_sem)) { - retval = -EBUSY; - goto error_out; - } - } else { - if (!down_read_trylock(&sdp->o_sem)) { - retval = -EBUSY; - goto error_out; - } + if (flags & O_EXCL) { + if (O_RDONLY == (flags & O_ACCMODE)) { + retval = -EPERM; /* Can't lock it with read only access */ + goto error_out; + } + if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { + retval = -EBUSY; + goto error_out; + } + res = wait_event_interruptible(sdp->o_excl_wait, + ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); + if (res) { + retval = res; /* -ERESTARTSYS because signal hit process */ + goto error_out; + } + } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ + if (flags & O_NONBLOCK) { + retval = -EBUSY; + goto error_out; + } + res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); + if (res) { + retval = res; /* -ERESTARTSYS because signal hit process */ + goto error_out; } - } else { - if (flags & O_EXCL) - down_write(&sdp->o_sem); - else - down_read(&sdp->o_sem); } - /* Since write lock is held, no need to check sfd_list */ - if (flags & O_EXCL) - sdp->exclude = 1; /* used by release lock */ - + if (sdp->detached) { + retval = -ENODEV; + goto error_out; + } if (sfds_list_empty(sdp)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } - sfp = sg_add_sfp(sdp, dev); - if (!IS_ERR(sfp)) + if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; - /* retval is already provably zero at this point because of the - * check after retval = scsi_autopm_get_device(sdp->device)) - */ else { - retval = PTR_ERR(sfp); - if (flags & O_EXCL) { - sdp->exclude = 0; /* undo if error */ - up_write(&sdp->o_sem); - } else - up_read(&sdp->o_sem); + set_exclude(sdp, 0); /* undo if error */ + wake_up_interruptible(&sdp->o_excl_wait); + } + retval = -ENOMEM; + goto error_out; + } + retval = 0; error_out: + if (retval) { scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); @@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; - int excl; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); - excl = sdp->exclude; - sdp->exclude = 0; - if (excl) - up_write(&sdp->o_sem); - else - up_read(&sdp->o_sem); + set_exclude(sdp, 0); + wake_up_interruptible(&sdp->o_excl_wait); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); @@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; - spin_lock_init(&sdp->sfd_lock); INIT_LIST_HEAD(&sdp->sfds); - init_rwsem(&sdp->o_sem); + init_waitqueue_head(&sdp->o_excl_wait); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); @@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) /* Need a write lock to set sdp->detached. */ write_lock_irqsave(&sg_index_lock, iflags); - spin_lock(&sdp->sfd_lock); sdp->detached = 1; list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } - spin_unlock(&sdp->sfd_lock); write_unlock_irqrestore(&sg_index_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); @@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) - return ERR_PTR(-ENOMEM); + return NULL; init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); @@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; - spin_lock_irqsave(&sdp->sfd_lock, iflags); - if (sdp->detached) { - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); - return ERR_PTR(-ENODEV); - } + write_lock_irqsave(&sg_index_lock, iflags); list_add_tail(&sfp->sfd_siblings, &sdp->sfds); - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); + write_unlock_irqrestore(&sg_index_lock, iflags); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; @@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref) struct sg_device *sdp = sfp->parentdp; unsigned long iflags; - spin_lock_irqsave(&sdp->sfd_lock, iflags); + write_lock_irqsave(&sg_index_lock, iflags); list_del(&sfp->sfd_siblings); - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); + write_unlock_irqrestore(&sg_index_lock, iflags); + wake_up_interruptible(&sdp->o_excl_wait); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); @@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) return 0; } -/* must be called while holding sg_index_lock and sfd_lock */ +/* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; @@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; - if (sdp) { - spin_lock(&sdp->sfd_lock); - if (!list_empty(&sdp->sfds)) { - struct scsi_device *scsidp = sdp->device; + if (sdp && !list_empty(&sdp->sfds)) { + struct scsi_device *scsidp = sdp->device; - seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); - if (sdp->detached) - seq_printf(s, "detached pending close "); - else - seq_printf - (s, "scsi%d chan=%d id=%d lun=%d em=%d", - scsidp->host->host_no, - scsidp->channel, scsidp->id, - scsidp->lun, - scsidp->host->hostt->emulated); - seq_printf(s, " sg_tablesize=%d excl=%d\n", - sdp->sg_tablesize, sdp->exclude); - sg_proc_debug_helper(s, sdp); - } - spin_unlock(&sdp->sfd_lock); + seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); + if (sdp->detached) + seq_printf(s, "detached pending close "); + else + seq_printf + (s, "scsi%d chan=%d id=%d lun=%d em=%d", + scsidp->host->host_no, + scsidp->channel, scsidp->id, + scsidp->lun, + scsidp->host->hostt->emulated); + seq_printf(s, " sg_tablesize=%d excl=%d\n", + sdp->sg_tablesize, get_exclude(sdp)); + sg_proc_debug_helper(s, sdp); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c index f91bc1fdd895..639ba96adb36 100644 --- a/drivers/staging/bcm/Bcmchar.c +++ b/drivers/staging/bcm/Bcmchar.c @@ -1960,6 +1960,7 @@ cntrlEnd: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); + memset(&DevInfo, 0, sizeof(DevInfo)); DevInfo.MaxRDMBufferSize = BUFFER_4K; DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; DevInfo.u32RxAlignmentCorrection = 0; diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c index 6ccb64fb0786..6ce0af9977d8 100644 --- a/drivers/staging/ozwpan/ozcdev.c +++ b/drivers/staging/ozwpan/ozcdev.c @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf, struct oz_app_hdr *app_hdr; struct oz_serial_ctx *ctx; + if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr)) + return -EINVAL; + spin_lock_bh(&g_cdev.lock); pd = g_cdev.active_pd; if (pd) diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c index 23db32f07fd5..a10cdb17038b 100644 --- a/drivers/staging/sb105x/sb_pci_mp.c +++ b/drivers/staging/sb105x/sb_pci_mp.c @@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg) static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) { - struct serial_icounter_struct icount; + struct serial_icounter_struct icount = {}; struct sb_uart_icount cnow; struct sb_uart_port *port = state->port; diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c index c97e0e154d28..7e10dcdc3090 100644 --- a/drivers/staging/wlags49_h2/wl_priv.c +++ b/drivers/staging/wlags49_h2/wl_priv.c @@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp) ltv_t *pLtv; bool_t ltvAllocated = FALSE; ENCSTRCT sEncryption; + size_t len; #ifdef USE_WDS hcf_16 hcfPort = HCF_PORT_0; @@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp) break; case CFG_CNF_OWN_NAME: memset(lp->StationName, 0, sizeof(lp->StationName)); - memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); + len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName)); + strlcpy(lp->StationName, &pLtv->u.u8[2], len); pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); break; case CFG_CNF_LOAD_BALANCING: @@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev, { struct wl_private *lp = wl_priv(dev); unsigned long flags; + size_t len; int ret = 0; /*------------------------------------------------------------------------*/ @@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev, wl_lock(lp, &flags); memset(lp->StationName, 0, sizeof(lp->StationName)); - - memcpy(lp->StationName, extra, wrqu->data.length); + len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName)); + strlcpy(lp->StationName, extra, len); /* Commit the adapter parameters */ wl_apply(lp); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 551c96ca60ac..0f199f6a0738 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) * pSCSI Host ID and enable for phba mode */ sh = scsi_host_lookup(phv->phv_host_id); - if (IS_ERR(sh)) { + if (!sh) { pr_err("pSCSI: Unable to locate SCSI Host for" " phv_host_id: %d\n", phv->phv_host_id); - return PTR_ERR(sh); + return -EINVAL; } phv->phv_lld_host = sh; @@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev) sh = phv->phv_lld_host; } else { sh = scsi_host_lookup(pdv->pdv_host_id); - if (IS_ERR(sh)) { + if (!sh) { pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return PTR_ERR(sh); + return -EINVAL; } } } else { diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 4714c6f8da4b..d9b92b2c524d 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o sectors, cmd->se_dev->dev_attrib.max_write_same_len); return TCM_INVALID_CDB_FIELD; } + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ + if (flags[0] & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); + return TCM_INVALID_CDB_FIELD; + } /* * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 3da4fd10b9f8..474cd44fac14 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op mutex_lock(&g_device_mutex); list_for_each_entry(se_dev, &g_device_list, g_dev_node) { + if (!se_dev->dev_attrib.emulate_3pc) + continue; + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); @@ -357,6 +360,7 @@ struct xcopy_pt_cmd { struct se_cmd se_cmd; struct xcopy_op *xcopy_op; struct completion xpt_passthrough_sem; + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; }; static struct se_port xcopy_pt_port; @@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", se_cmd->scsi_status); - return 0; + + return (se_cmd->scsi_status) ? -EINVAL : 0; } static int target_xcopy_read_source( @@ -708,7 +713,7 @@ static int target_xcopy_read_source( (unsigned long long)src_lba, src_sectors, length); transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, - DMA_FROM_DEVICE, 0, NULL); + DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); xop->src_pt_cmd = xpt_cmd; rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], @@ -768,7 +773,7 @@ static int target_xcopy_write_destination( (unsigned long long)dst_lba, dst_sectors, length); transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, - DMA_TO_DEVICE, 0, NULL); + DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); xop->dst_pt_cmd = xpt_cmd; rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], @@ -884,30 +889,42 @@ out: sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) { + struct se_device *dev = se_cmd->se_dev; struct xcopy_op *xop = NULL; unsigned char *p = NULL, *seg_desc; unsigned int list_id, list_id_usage, sdll, inline_dl, sa; + sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; int rc; unsigned short tdll; + if (!dev->dev_attrib.emulate_3pc) { + pr_err("EXTENDED_COPY operation explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + sa = se_cmd->t_task_cdb[1] & 0x1f; if (sa != 0x00) { pr_err("EXTENDED_COPY(LID4) not supported\n"); return TCM_UNSUPPORTED_SCSI_OPCODE; } + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); + if (!xop) { + pr_err("Unable to allocate xcopy_op\n"); + return TCM_OUT_OF_RESOURCES; + } + xop->xop_se_cmd = se_cmd; + p = transport_kmap_data_sg(se_cmd); if (!p) { pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); + kfree(xop); return TCM_OUT_OF_RESOURCES; } list_id = p[0]; - if (list_id != 0x00) { - pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); - goto out; - } - list_id_usage = (p[1] & 0x18); + list_id_usage = (p[1] & 0x18) >> 3; + /* * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH */ @@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) goto out; } - xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); - if (!xop) { - pr_err("Unable to allocate xcopy_op\n"); - goto out; - } - xop->xop_se_cmd = se_cmd; - pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); @@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) if (rc <= 0) goto out; + if (xop->src_dev->dev_attrib.block_size != + xop->dst_dev->dev_attrib.block_size) { + pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" + " block_size: %u currently unsupported\n", + xop->src_dev->dev_attrib.block_size, + xop->dst_dev->dev_attrib.block_size); + xcopy_pt_undepend_remotedev(xop); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * XCOPY_TARGET_DESC_LEN); seg_desc = &p[16]; @@ -957,7 +978,7 @@ out: if (p) transport_kunmap_data_sg(se_cmd); kfree(xop); - return TCM_INVALID_CDB_FIELD; + return ret; } static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d067285a2d20..6b0f75eac8a2 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port) /* * Get ip name usart or uart */ -static int atmel_get_ip_name(struct uart_port *port) +static void atmel_get_ip_name(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); int name = UART_GET_IP_NAME(port); @@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port) atmel_port->is_usart = false; } else { dev_err(port->dev, "Not supported ip name, set to uart\n"); - return -EINVAL; } - - return 0; } /* @@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev) /* * Get port name of usart or uart */ - ret = atmel_get_ip_name(&port->uart); - if (ret < 0) - goto err_add_port; + atmel_get_ip_name(&port->uart); return 0; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index ba475632c5fa..0e808cf91d97 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); + struct uio_mem *mem; if (mi < 0) return -EINVAL; + mem = idev->info->mem + mi; - vma->vm_ops = &uio_physical_vm_ops; + if (vma->vm_end - vma->vm_start > mem->size) + return -EINVAL; + vma->vm_ops = &uio_physical_vm_ops; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + /* + * We cannot use the vm_iomap_memory() helper here, + * because vma->vm_pgoff is the map index we looked + * up above in uio_find_mem_index(), rather than an + * actual page offset into the mmap. + * + * So we just do the physical mmap without a page + * offset. + */ return remap_pfn_range(vma, vma->vm_start, - idev->info->mem[mi].addr >> PAGE_SHIFT, + mem->addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c45f9c0a1b34..b21d553c245b 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, /* Crucible Devices */ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 1b8af461b522..a7019d1e3058 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1307,3 +1307,9 @@ * Manufacturer: Crucible Technologies */ #define FTDI_CT_COMET_PID 0x8e08 + +/* + * Product: Z3X Box + * Manufacturer: Smart GSM Team + */ +#define FTDI_Z3X_PID 0x0011 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index bedf8e47713b..1e6de4cd079d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -4,11 +4,6 @@ * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2003 IBM Corp. * - * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com> - * - fixes, improvements and documentation for the baud rate encoding methods - * Copyright (C) 2013 Reinhard Max <max@suse.de> - * - fixes and improvements for the divisor based baud rate encoding method - * * Original driver for 2.2.x by anonymous * * This program is free software; you can redistribute it and/or @@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table); enum pl2303_type { - type_0, /* H version ? */ - type_1, /* H version ? */ - HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ - HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */ - TB, /* TB version */ - HX_CLONE, /* Cheap and less functional clone of the HX chip */ + type_0, /* don't know the difference between type 0 and */ + type_1, /* type 1, until someone from prolific tells us... */ + HX, /* HX version of the pl2303 chip */ }; -/* - * NOTE: don't know the difference between type 0 and type 1, - * until someone from Prolific tells us... - * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants - */ struct pl2303_serial_private { enum pl2303_type type; @@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial) { struct pl2303_serial_private *spriv; enum pl2303_type type = type_0; - char *type_str = "unknown (treating as type_0)"; unsigned char *buf; spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); @@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial) return -ENOMEM; } - if (serial->dev->descriptor.bDeviceClass == 0x02) { + if (serial->dev->descriptor.bDeviceClass == 0x02) type = type_0; - type_str = "type_0"; - } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { - /* - * NOTE: The bcdDevice version is the only difference between - * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB - */ - if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) { - /* Check if the device is a clone */ - pl2303_vendor_read(0x9494, 0, serial, buf); - /* - * NOTE: Not sure if this read is really needed. - * The HX returns 0x00, the clone 0x02, but the Windows - * driver seems to ignore the value and continues. - */ - pl2303_vendor_write(0x0606, 0xaa, serial); - pl2303_vendor_read(0x8686, 0, serial, buf); - if (buf[0] != 0xaa) { - type = HX_CLONE; - type_str = "X/HX clone (limited functionality)"; - } else { - type = HX_TA; - type_str = "X/HX/TA"; - } - pl2303_vendor_write(0x0606, 0x00, serial); - } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) - == 0x400) { - type = HXD_EA_RA_SA; - type_str = "HXD/EA/RA/SA"; - } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) - == 0x500) { - type = TB; - type_str = "TB"; - } else { - dev_info(&serial->interface->dev, - "unknown/unsupported device type\n"); - kfree(spriv); - kfree(buf); - return -ENODEV; - } - } else if (serial->dev->descriptor.bDeviceClass == 0x00 - || serial->dev->descriptor.bDeviceClass == 0xFF) { + else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) + type = HX; + else if (serial->dev->descriptor.bDeviceClass == 0x00) type = type_1; - type_str = "type_1"; - } - dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); + else if (serial->dev->descriptor.bDeviceClass == 0xFF) + type = type_1; + dev_dbg(&serial->interface->dev, "device type: %d\n", type); spriv->type = type; usb_set_serial_data(serial, spriv); @@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial) pl2303_vendor_read(0x8383, 0, serial, buf); pl2303_vendor_write(0, 1, serial); pl2303_vendor_write(1, 0, serial); - if (type == type_0 || type == type_1) - pl2303_vendor_write(2, 0x24, serial); - else + if (type == HX) pl2303_vendor_write(2, 0x44, serial); + else + pl2303_vendor_write(2, 0x24, serial); kfree(buf); return 0; @@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value) return retval; } -static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, - u8 buf[4]) +static void pl2303_encode_baudrate(struct tty_struct *tty, + struct usb_serial_port *port, + u8 buf[4]) { - /* - * NOTE: Only the values defined in baud_sup are supported ! - * => if unsupported values are set, the PL2303 uses 9600 baud instead - * => HX clones just don't work at unsupported baud rates < 115200 baud, - * for baud rates > 115200 they run at 115200 baud - */ const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, - 4800, 7200, 9600, 14400, 19200, 28800, 38400, - 57600, 115200, 230400, 460800, 614400, 921600, - 1228800, 2457600, 3000000, 6000000, 12000000 }; + 4800, 7200, 9600, 14400, 19200, 28800, 38400, + 57600, 115200, 230400, 460800, 500000, 614400, + 921600, 1228800, 2457600, 3000000, 6000000 }; + + struct usb_serial *serial = port->serial; + struct pl2303_serial_private *spriv = usb_get_serial_data(serial); + int baud; + int i; + /* - * NOTE: With the exception of type_0/1 devices, the following - * additional baud rates are supported (tested with HX rev. 3A only): - * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, - * 403200, 806400. (*: not HX and HX clones) - * - * Maximum values: HXD, TB: 12000000; HX, TA: 6000000; - * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200 - * - * As long as we are not using this encoding method for anything else - * than the type_0+1, HX and HX clone chips, there is no point in - * complicating the code to support them. + * NOTE: Only the values defined in baud_sup are supported! + * => if unsupported values are set, the PL2303 seems to use + * 9600 baud (at least my PL2303X always does) */ - int i; + baud = tty_get_baud_rate(tty); + dev_dbg(&port->dev, "baud requested = %d\n", baud); + if (!baud) + return; /* Set baudrate to nearest supported value */ for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { if (baud_sup[i] > baud) break; } + if (i == ARRAY_SIZE(baud_sup)) baud = baud_sup[i - 1]; else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) baud = baud_sup[i - 1]; else baud = baud_sup[i]; - /* Respect the chip type specific baud rate limits */ - /* - * FIXME: as long as we don't know how to distinguish between the - * HXD, EA, RA, and SA chip variants, allow the max. value of 12M. - */ - if (type == HX_TA) - baud = min_t(int, baud, 6000000); - else if (type == type_0 || type == type_1) - baud = min_t(int, baud, 1228800); - else if (type == HX_CLONE) - baud = min_t(int, baud, 115200); - /* Direct (standard) baud rate encoding method */ - put_unaligned_le32(baud, buf); - - return baud; -} -static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type, - u8 buf[4]) -{ - /* - * Divisor based baud rate encoding method - * - * NOTE: HX clones do NOT support this method. - * It's not clear if the type_0/1 chips support it. - * - * divisor = 12MHz * 32 / baudrate = 2^A * B - * - * with - * - * A = buf[1] & 0x0e - * B = buf[0] + (buf[1] & 0x01) << 8 - * - * Special cases: - * => 8 < B < 16: device seems to work not properly - * => B <= 8: device uses the max. value B = 512 instead - */ - unsigned int A, B; + /* type_0, type_1 only support up to 1228800 baud */ + if (spriv->type != HX) + baud = min_t(int, baud, 1228800); - /* - * NOTE: The Windows driver allows maximum baud rates of 110% of the - * specified maximium value. - * Quick tests with early (2004) HX (rev. A) chips suggest, that even - * higher baud rates (up to the maximum of 24M baud !) are working fine, - * but that should really be tested carefully in "real life" scenarios - * before removing the upper limit completely. - * Baud rates smaller than the specified 75 baud are definitely working - * fine. - */ - if (type == type_0 || type == type_1) - baud = min_t(int, baud, 1228800 * 1.1); - else if (type == HX_TA) - baud = min_t(int, baud, 6000000 * 1.1); - else if (type == HXD_EA_RA_SA) - /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */ - /* - * FIXME: as long as we don't know how to distinguish between - * these chip variants, allow the max. of these values - */ - baud = min_t(int, baud, 12000000 * 1.1); - else if (type == TB) - baud = min_t(int, baud, 12000000 * 1.1); - /* Determine factors A and B */ - A = 0; - B = 12000000 * 32 / baud; /* 12MHz */ - B <<= 1; /* Add one bit for rounding */ - while (B > (512 << 1) && A <= 14) { - A += 2; - B >>= 2; - } - if (A > 14) { /* max. divisor = min. baudrate reached */ - A = 14; - B = 512; - /* => ~45.78 baud */ + if (baud <= 115200) { + put_unaligned_le32(baud, buf); } else { - B = (B + 1) >> 1; /* Round the last bit */ - } - /* Handle special cases */ - if (B == 512) - B = 0; /* also: 1 to 8 */ - else if (B < 16) /* - * NOTE: With the current algorithm this happens - * only for A=0 and means that the min. divisor - * (respectively: the max. baudrate) is reached. + * Apparently the formula for higher speeds is: + * baudrate = 12M * 32 / (2^buf[1]) / buf[0] */ - B = 16; /* => 24 MBaud */ - /* Encode the baud rate */ - buf[3] = 0x80; /* Select divisor encoding method */ - buf[2] = 0; - buf[1] = (A & 0x0e); /* A */ - buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ - buf[0] = B & 0xff; /* 8 LSBs of B */ - /* Calculate the actual/resulting baud rate */ - if (B <= 8) - B = 512; - baud = 12000000 * 32 / ((1 << A) * B); - - return baud; -} - -static void pl2303_encode_baudrate(struct tty_struct *tty, - struct usb_serial_port *port, - enum pl2303_type type, - u8 buf[4]) -{ - int baud; + unsigned tmp = 12000000 * 32 / baud; + buf[3] = 0x80; + buf[2] = 0; + buf[1] = (tmp >= 256); + while (tmp >= 256) { + tmp >>= 2; + buf[1] <<= 1; + } + buf[0] = tmp; + } - baud = tty_get_baud_rate(tty); - dev_dbg(&port->dev, "baud requested = %d\n", baud); - if (!baud) - return; - /* - * There are two methods for setting/encoding the baud rate - * 1) Direct method: encodes the baud rate value directly - * => supported by all chip types - * 2) Divisor based method: encodes a divisor to a base value (12MHz*32) - * => not supported by HX clones (and likely type_0/1 chips) - * - * NOTE: Although the divisor based baud rate encoding method is much - * more flexible, some of the standard baud rate values can not be - * realized exactly. But the difference is very small (max. 0.2%) and - * the device likely uses the same baud rate generator for both methods - * so that there is likley no difference. - */ - if (type == type_0 || type == type_1 || type == HX_CLONE) - baud = pl2303_baudrate_encode_direct(baud, type, buf); - else - baud = pl2303_baudrate_encode_divisor(baud, type, buf); /* Save resulting baud rate */ tty_encode_baud_rate(tty, baud, baud); dev_dbg(&port->dev, "baud set = %d\n", baud); @@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty, dev_dbg(&port->dev, "data bits = %d\n", buf[6]); } - /* For reference: buf[0]:buf[3] baud rate value */ - pl2303_encode_baudrate(tty, port, spriv->type, buf); + /* For reference buf[0]:buf[3] baud rate value */ + pl2303_encode_baudrate(tty, port, &buf[0]); /* For reference buf[4]=0 is 1 stop bits */ /* For reference buf[4]=1 is 1.5 stop bits */ @@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty, dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); if (C_CRTSCTS(tty)) { - if (spriv->type == type_0 || spriv->type == type_1) - pl2303_vendor_write(0x0, 0x41, serial); - else + if (spriv->type == HX) pl2303_vendor_write(0x0, 0x61, serial); + else + pl2303_vendor_write(0x0, 0x41, serial); } else { pl2303_vendor_write(0x0, 0x0, serial); } @@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) struct pl2303_serial_private *spriv = usb_get_serial_data(serial); int result; - if (spriv->type == type_0 || spriv->type == type_1) { + if (spriv->type != HX) { usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); } else { @@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state) result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 0, NULL, 0, 100); - /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */ if (result) dev_err(&port->dev, "error sending break = %d\n", result); } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index ce5221fa393a..e663921eebb6 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (data_direction != DMA_NONE) { ret = vhost_scsi_map_iov_to_sgl(cmd, &vq->iov[data_first], data_num, - data_direction == DMA_TO_DEVICE); + data_direction == DMA_FROM_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); goto err_free; diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index a54ccdc4d661..22ad85242e5b 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c @@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle) int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev; - unsigned int len; - unsigned long start=0, off; fbdev = to_au1100fb_device(fbi); - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { - return -EINVAL; - } - - start = fbdev->fb_phys & PAGE_MASK; - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); - - off = vma->vm_pgoff << PAGE_SHIFT; - - if ((vma->vm_end - vma->vm_start + off) > len) { - return -EINVAL; - } - - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) { - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); } static struct fb_ops au1100fb_ops = diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c index 301224ecc950..1d02897d17f2 100644 --- a/drivers/video/au1200fb.c +++ b/drivers/video/au1200fb.c @@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi) * method mainly to allow the use of the TLB streaming flag (CCA=6) */ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) - { - unsigned int len; - unsigned long start=0, off; struct au1200fb_device *fbdev = info->par; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { - return -EINVAL; - } - - start = fbdev->fb_phys & PAGE_MASK; - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); - - off = vma->vm_pgoff << PAGE_SHIFT; - - if ((vma->vm_end - vma->vm_start + off) > len) { - return -EINVAL; - } - - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ - return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); } static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) diff --git a/fs/dcache.c b/fs/dcache.c index 20532cb0b06e..ae6ebb88ceff 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -542,7 +542,7 @@ EXPORT_SYMBOL(d_drop); * If ref is non-zero, then decrement the refcount too. * Returns dentry requiring refcount drop, or NULL if we're done. */ -static inline struct dentry * +static struct dentry * dentry_kill(struct dentry *dentry, int unlock_on_failure) __releases(dentry->d_lock) { @@ -630,7 +630,8 @@ repeat: goto kill_it; } - dentry->d_flags |= DCACHE_REFERENCED; + if (!(dentry->d_flags & DCACHE_REFERENCED)) + dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); dentry->d_lockref.count--; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 473e09da7d02..810c28fb8c3c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -34,7 +34,6 @@ #include <linux/mutex.h> #include <linux/anon_inodes.h> #include <linux/device.h> -#include <linux/freezer.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/mman.h> @@ -1605,8 +1604,7 @@ fetch_events: } spin_unlock_irqrestore(&ep->lock, flags); - if (!freezable_schedule_hrtimeout_range(to, slack, - HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; spin_lock_irqsave(&ep->lock, flags); diff --git a/fs/file_table.c b/fs/file_table.c index abdd15ad13c9..e900ca518635 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -297,7 +297,7 @@ void flush_delayed_fput(void) delayed_fput(NULL); } -static DECLARE_WORK(delayed_fput_work, delayed_fput); +static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); void fput(struct file *file) { @@ -317,7 +317,7 @@ void fput(struct file *file) } if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) - schedule_work(&delayed_fput_work); + schedule_delayed_work(&delayed_fput_work, 1); } } diff --git a/fs/select.c b/fs/select.c index 35d4adc749d9..dfd5cb18c012 100644 --- a/fs/select.c +++ b/fs/select.c @@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, set_current_state(state); if (!pwq->triggered) - rc = freezable_schedule_hrtimeout_range(expires, slack, - HRTIMER_MODE_ABS); + rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); __set_current_state(TASK_RUNNING); /* diff --git a/fs/seq_file.c b/fs/seq_file.c index 3135c2525c76..a290157265ef 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) m->read_pos = offset; retval = file->f_pos = offset; } + } else { + file->f_pos = offset; } } file->f_version = m->version; diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 19c19a5eee29..f6c82de12541 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -34,9 +34,9 @@ struct ipc_namespace { int sem_ctls[4]; int used_sems; - int msg_ctlmax; - int msg_ctlmnb; - int msg_ctlmni; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; atomic_t msg_bytes; atomic_t msg_hdrs; int auto_msgmni; diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cc88172c7d9a..c74088ab103b 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -332,7 +332,7 @@ do { \ #endif #ifndef this_cpu_sub -# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef this_cpu_inc @@ -418,7 +418,7 @@ do { \ # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) #endif -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) @@ -586,7 +586,7 @@ do { \ #endif #ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) #endif #ifndef __this_cpu_inc @@ -668,7 +668,7 @@ do { \ __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) #endif -#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) diff --git a/include/trace/events/target.h b/include/trace/events/target.h index aef8fc354025..da9cc0f05c93 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start, ), TP_fast_assign( - __entry->unpacked_lun = cmd->se_lun->unpacked_lun; + __entry->unpacked_lun = cmd->orig_fe_lun; __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; @@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete, ), TP_fast_assign( - __entry->unpacked_lun = cmd->se_lun->unpacked_lun; + __entry->unpacked_lun = cmd->orig_fe_lun; __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 009a655a5d35..2fc1602e23bb 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -456,13 +456,15 @@ struct perf_event_mmap_page { /* * Control data for the mmap() data buffer. * - * User-space reading the @data_head value should issue an rmb(), on - * SMP capable platforms, after reading this value -- see - * perf_event_wakeup(). + * User-space reading the @data_head value should issue an smp_rmb(), + * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data. In this case - * the kernel will not over-write unread data. + * written by userspace to reflect the last read data, after issueing + * an smp_mb() to separate the data read from the ->data_tail store. + * In this case the kernel will not over-write unread data. + * + * See perf_output_put_handle() for the data ordering. */ __u64 data_head; /* head in the data section */ __u64 data_tail; /* user-space written tail */ diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 130dfece27ac..b0e99deb6d05 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, return err; } -static int proc_ipc_callback_dointvec(ctl_table *table, int write, +static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; @@ -72,7 +72,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write, memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); - rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); if (write && !rc && lenp_bef == *lenp) /* @@ -152,15 +152,13 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, #define proc_ipc_dointvec NULL #define proc_ipc_dointvec_minmax NULL #define proc_ipc_dointvec_minmax_orphans NULL -#define proc_ipc_callback_dointvec NULL +#define proc_ipc_callback_dointvec_minmax NULL #define proc_ipcauto_dointvec_minmax NULL #endif static int zero; static int one = 1; -#ifdef CONFIG_CHECKPOINT_RESTORE static int int_max = INT_MAX; -#endif static struct ctl_table ipc_kern_table[] = { { @@ -198,21 +196,27 @@ static struct ctl_table ipc_kern_table[] = { .data = &init_ipc_ns.msg_ctlmax, .maxlen = sizeof (init_ipc_ns.msg_ctlmax), .mode = 0644, - .proc_handler = proc_ipc_dointvec, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, .maxlen = sizeof (init_ipc_ns.msg_ctlmni), .mode = 0644, - .proc_handler = proc_ipc_callback_dointvec, + .proc_handler = proc_ipc_callback_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), .mode = 0644, - .proc_handler = proc_ipc_dointvec, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { .procname = "sem", diff --git a/kernel/events/core.c b/kernel/events/core.c index d49a9d29334c..953c14348375 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (ret) return -EFAULT; + /* disabled for now */ + if (attr->mmap2) + return -EINVAL; + if (attr->__reserved_1) return -EINVAL; diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index cd55144270b5..9c2ddfbf4525 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -87,10 +87,31 @@ again: goto out; /* - * Publish the known good head. Rely on the full barrier implied - * by atomic_dec_and_test() order the rb->head read and this - * write. + * Since the mmap() consumer (userspace) can run on a different CPU: + * + * kernel user + * + * READ ->data_tail READ ->data_head + * smp_mb() (A) smp_rmb() (C) + * WRITE $data READ $data + * smp_wmb() (B) smp_mb() (D) + * STORE ->data_head WRITE ->data_tail + * + * Where A pairs with D, and B pairs with C. + * + * I don't think A needs to be a full barrier because we won't in fact + * write data until we see the store from userspace. So we simply don't + * issue the data WRITE until we observe it. Be conservative for now. + * + * OTOH, D needs to be a full barrier since it separates the data READ + * from the tail WRITE. + * + * For B a WMB is sufficient since it separates two WRITEs, and for C + * an RMB is sufficient since it separates two READs. + * + * See perf_output_begin(). */ + smp_wmb(); rb->user_page->data_head = head; /* @@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle, * Userspace could choose to issue a mb() before updating the * tail pointer. So that all reads will be completed before the * write is issued. + * + * See perf_output_put_handle(). */ tail = ACCESS_ONCE(rb->user_page->data_tail); - smp_rmb(); + smp_mb(); offset = head = local_read(&rb->head); head += size; if (unlikely(!perf_output_space(rb, tail, offset, head))) diff --git a/kernel/mutex.c b/kernel/mutex.c index 6d647aedffea..d24105b1b794 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, static __always_inline int __sched __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, - struct ww_acquire_ctx *ww_ctx) + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { struct task_struct *task = current; struct mutex_waiter waiter; @@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct task_struct *owner; struct mspin_node node; - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { + if (use_ww_ctx && ww_ctx->acquired > 0) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); @@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, if ((atomic_read(&lock->count) == 1) && (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { lock_acquired(&lock->dep_map, ip); - if (!__builtin_constant_p(ww_ctx == NULL)) { + if (use_ww_ctx) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); @@ -551,7 +551,7 @@ slowpath: goto err; } - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { + if (use_ww_ctx && ww_ctx->acquired > 0) { ret = __mutex_lock_check_stamp(lock, ww_ctx); if (ret) goto err; @@ -575,7 +575,7 @@ skip_wait: lock_acquired(&lock->dep_map, ip); mutex_set_owner(lock); - if (!__builtin_constant_p(ww_ctx == NULL)) { + if (use_ww_ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct mutex_waiter *cur; @@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, - subclass, NULL, _RET_IP_, NULL); + subclass, NULL, _RET_IP_, NULL, 0); } EXPORT_SYMBOL_GPL(mutex_lock_nested); @@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) { might_sleep(); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, - 0, nest, _RET_IP_, NULL); + 0, nest, _RET_IP_, NULL, 0); } EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); @@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_KILLABLE, - subclass, NULL, _RET_IP_, NULL); + subclass, NULL, _RET_IP_, NULL, 0); } EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); @@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, - subclass, NULL, _RET_IP_, NULL); + subclass, NULL, _RET_IP_, NULL, 0); } EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); @@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) might_sleep(); ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, - 0, &ctx->dep_map, _RET_IP_, ctx); + 0, &ctx->dep_map, _RET_IP_, ctx, 1); if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); @@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) might_sleep(); ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, - 0, &ctx->dep_map, _RET_IP_, ctx); + 0, &ctx->dep_map, _RET_IP_, ctx, 1); if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); @@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count) struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, - NULL, _RET_IP_, NULL); + NULL, _RET_IP_, NULL, 0); } static noinline int __sched __mutex_lock_killable_slowpath(struct mutex *lock) { return __mutex_lock_common(lock, TASK_KILLABLE, 0, - NULL, _RET_IP_, NULL); + NULL, _RET_IP_, NULL, 0); } static noinline int __sched __mutex_lock_interruptible_slowpath(struct mutex *lock) { return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, - NULL, _RET_IP_, NULL); + NULL, _RET_IP_, NULL, 0); } static noinline int __sched __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, - NULL, _RET_IP_, ctx); + NULL, _RET_IP_, ctx, 1); } static noinline int __sched @@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, - NULL, _RET_IP_, ctx); + NULL, _RET_IP_, ctx, 1); } #endif diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c9c759d5a15c..0121dab83f43 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -846,7 +846,7 @@ static int software_resume(void) goto Finish; } -late_initcall(software_resume); +late_initcall_sync(software_resume); static const char * const hibernation_modes[] = { diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 38959c866789..662c5798a685 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -33,29 +33,64 @@ struct ce_unbind { int res; }; -/** - * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds - * @latch: value to convert - * @evt: pointer to clock event device descriptor - * - * Math helper, returns latch value converted to nanoseconds (bound checked) - */ -u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) +static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, + bool ismax) { u64 clc = (u64) latch << evt->shift; + u64 rnd; if (unlikely(!evt->mult)) { evt->mult = 1; WARN_ON(1); } + rnd = (u64) evt->mult - 1; + + /* + * Upper bound sanity check. If the backwards conversion is + * not equal latch, we know that the above shift overflowed. + */ + if ((clc >> evt->shift) != (u64)latch) + clc = ~0ULL; + + /* + * Scaled math oddities: + * + * For mult <= (1 << shift) we can safely add mult - 1 to + * prevent integer rounding loss. So the backwards conversion + * from nsec to device ticks will be correct. + * + * For mult > (1 << shift), i.e. device frequency is > 1GHz we + * need to be careful. Adding mult - 1 will result in a value + * which when converted back to device ticks can be larger + * than latch by up to (mult - 1) >> shift. For the min_delta + * calculation we still want to apply this in order to stay + * above the minimum device ticks limit. For the upper limit + * we would end up with a latch value larger than the upper + * limit of the device, so we omit the add to stay below the + * device upper boundary. + * + * Also omit the add if it would overflow the u64 boundary. + */ + if ((~0ULL - clc > rnd) && + (!ismax || evt->mult <= (1U << evt->shift))) + clc += rnd; do_div(clc, evt->mult); - if (clc < 1000) - clc = 1000; - if (clc > KTIME_MAX) - clc = KTIME_MAX; - return clc; + /* Deltas less than 1usec are pointless noise */ + return clc > 1000 ? clc : 1000; +} + +/** + * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds + * @latch: value to convert + * @evt: pointer to clock event device descriptor + * + * Math helper, returns latch value converted to nanoseconds (bound checked) + */ +u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) +{ + return cev_delta2ns(latch, evt, false); } EXPORT_SYMBOL_GPL(clockevent_delta2ns); @@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq) sec = 600; clockevents_calc_mult_shift(dev, freq, sec); - dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); - dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); + dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); + dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); } /** diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 06344d986eb9..094f3152ec2b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -983,7 +983,7 @@ config DEBUG_KOBJECT config DEBUG_KOBJECT_RELEASE bool "kobject release debugging" - depends on DEBUG_KERNEL + depends on DEBUG_OBJECTS_TIMERS help kobjects are reference counted objects. This means that their last reference count put is not predictable, and the kobject can diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a685c8a79578..d16fa295ae1d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter) miter->__offset += miter->consumed; miter->__remaining -= miter->consumed; - if (miter->__flags & SG_MITER_TO_SG) + if ((miter->__flags & SG_MITER_TO_SG) && + !PageSlab(miter->page)) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 610e3df2768a..cca80d96e509 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1278,64 +1278,90 @@ out: int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp) { + struct anon_vma *anon_vma = NULL; struct page *page; unsigned long haddr = addr & HPAGE_PMD_MASK; + int page_nid = -1, this_nid = numa_node_id(); int target_nid; - int current_nid = -1; - bool migrated; + bool page_locked; + bool migrated = false; spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(pmd, *pmdp))) goto out_unlock; page = pmd_page(pmd); - get_page(page); - current_nid = page_to_nid(page); + page_nid = page_to_nid(page); count_vm_numa_event(NUMA_HINT_FAULTS); - if (current_nid == numa_node_id()) + if (page_nid == this_nid) count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); + /* + * Acquire the page lock to serialise THP migrations but avoid dropping + * page_table_lock if at all possible + */ + page_locked = trylock_page(page); target_nid = mpol_misplaced(page, vma, haddr); if (target_nid == -1) { - put_page(page); - goto clear_pmdnuma; + /* If the page was locked, there are no parallel migrations */ + if (page_locked) + goto clear_pmdnuma; + + /* + * Otherwise wait for potential migrations and retry. We do + * relock and check_same as the page may no longer be mapped. + * As the fault is being retried, do not account for it. + */ + spin_unlock(&mm->page_table_lock); + wait_on_page_locked(page); + page_nid = -1; + goto out; } - /* Acquire the page lock to serialise THP migrations */ + /* Page is misplaced, serialise migrations and parallel THP splits */ + get_page(page); spin_unlock(&mm->page_table_lock); - lock_page(page); + if (!page_locked) + lock_page(page); + anon_vma = page_lock_anon_vma_read(page); /* Confirm the PTE did not while locked */ spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(pmd, *pmdp))) { unlock_page(page); put_page(page); + page_nid = -1; goto out_unlock; } - spin_unlock(&mm->page_table_lock); - /* Migrate the THP to the requested node */ + /* + * Migrate the THP to the requested node, returns with page unlocked + * and pmd_numa cleared. + */ + spin_unlock(&mm->page_table_lock); migrated = migrate_misplaced_transhuge_page(mm, vma, pmdp, pmd, addr, page, target_nid); - if (!migrated) - goto check_same; - - task_numa_fault(target_nid, HPAGE_PMD_NR, true); - return 0; + if (migrated) + page_nid = target_nid; -check_same: - spin_lock(&mm->page_table_lock); - if (unlikely(!pmd_same(pmd, *pmdp))) - goto out_unlock; + goto out; clear_pmdnuma: + BUG_ON(!PageLocked(page)); pmd = pmd_mknonnuma(pmd); set_pmd_at(mm, haddr, pmdp, pmd); VM_BUG_ON(pmd_numa(*pmdp)); update_mmu_cache_pmd(vma, addr, pmdp); + unlock_page(page); out_unlock: spin_unlock(&mm->page_table_lock); - if (current_nid != -1) - task_numa_fault(current_nid, HPAGE_PMD_NR, false); + +out: + if (anon_vma) + page_unlock_anon_vma_read(anon_vma); + + if (page_nid != -1) + task_numa_fault(page_nid, HPAGE_PMD_NR, migrated); + return 0; } diff --git a/mm/list_lru.c b/mm/list_lru.c index 72467914b856..72f9decb0104 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -81,8 +81,9 @@ restart: * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbesr of LRU_RETRY items */ - if (--(*nr_to_walk) == 0) + if (!*nr_to_walk) break; + --*nr_to_walk; ret = isolate(item, &nlru->lock, cb_arg); switch (ret) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 34d3ca9572d6..13b9d0f221b8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -54,6 +54,7 @@ #include <linux/page_cgroup.h> #include <linux/cpu.h> #include <linux/oom.h> +#include <linux/lockdep.h> #include "internal.h" #include <net/sock.h> #include <net/ip.h> @@ -2046,6 +2047,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, return total; } +#ifdef CONFIG_LOCKDEP +static struct lockdep_map memcg_oom_lock_dep_map = { + .name = "memcg_oom_lock", +}; +#endif + static DEFINE_SPINLOCK(memcg_oom_lock); /* @@ -2083,7 +2090,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) } iter->oom_lock = false; } - } + } else + mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); spin_unlock(&memcg_oom_lock); @@ -2095,6 +2103,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) struct mem_cgroup *iter; spin_lock(&memcg_oom_lock); + mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); for_each_mem_cgroup_tree(iter, memcg) iter->oom_lock = false; spin_unlock(&memcg_oom_lock); @@ -2765,10 +2774,10 @@ done: *ptr = memcg; return 0; nomem: - *ptr = NULL; - if (gfp_mask & __GFP_NOFAIL) - return 0; - return -ENOMEM; + if (!(gfp_mask & __GFP_NOFAIL)) { + *ptr = NULL; + return -ENOMEM; + } bypass: *ptr = root_mem_cgroup; return -EINTR; @@ -3773,8 +3782,7 @@ void mem_cgroup_move_account_page_stat(struct mem_cgroup *from, { /* Update stat data for mem_cgroup */ preempt_disable(); - WARN_ON_ONCE(from->stat->count[idx] < nr_pages); - __this_cpu_add(from->stat->count[idx], -nr_pages); + __this_cpu_sub(from->stat->count[idx], nr_pages); __this_cpu_add(to->stat->count[idx], nr_pages); preempt_enable(); } @@ -4950,31 +4958,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) } while (usage > 0); } -/* - * This mainly exists for tests during the setting of set of use_hierarchy. - * Since this is the very setting we are changing, the current hierarchy value - * is meaningless - */ -static inline bool __memcg_has_children(struct mem_cgroup *memcg) -{ - struct cgroup_subsys_state *pos; - - /* bounce at first found */ - css_for_each_child(pos, &memcg->css) - return true; - return false; -} - -/* - * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed - * to be already dead (as in mem_cgroup_force_empty, for instance). This is - * from mem_cgroup_count_children(), in the sense that we don't really care how - * many children we have; we only need to know if we have any. It also counts - * any memcg without hierarchy as infertile. - */ static inline bool memcg_has_children(struct mem_cgroup *memcg) { - return memcg->use_hierarchy && __memcg_has_children(memcg); + lockdep_assert_held(&memcg_create_mutex); + /* + * The lock does not prevent addition or deletion to the list + * of children, but it prevents a new child from being + * initialized based on this parent in css_online(), so it's + * enough to decide whether hierarchically inherited + * attributes can still be changed or not. + */ + return memcg->use_hierarchy && + !list_empty(&memcg->css.cgroup->children); } /* @@ -5054,7 +5049,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, */ if ((!parent_memcg || !parent_memcg->use_hierarchy) && (val == 1 || val == 0)) { - if (!__memcg_has_children(memcg)) + if (list_empty(&memcg->css.cgroup->children)) memcg->use_hierarchy = val; else retval = -EBUSY; diff --git a/mm/memory.c b/mm/memory.c index 1311f26497e6..d176154c243f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3521,12 +3521,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, } int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, - unsigned long addr, int current_nid) + unsigned long addr, int page_nid) { get_page(page); count_vm_numa_event(NUMA_HINT_FAULTS); - if (current_nid == numa_node_id()) + if (page_nid == numa_node_id()) count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); return mpol_misplaced(page, vma, addr); @@ -3537,7 +3537,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *page = NULL; spinlock_t *ptl; - int current_nid = -1; + int page_nid = -1; int target_nid; bool migrated = false; @@ -3567,15 +3567,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, return 0; } - current_nid = page_to_nid(page); - target_nid = numa_migrate_prep(page, vma, addr, current_nid); + page_nid = page_to_nid(page); + target_nid = numa_migrate_prep(page, vma, addr, page_nid); pte_unmap_unlock(ptep, ptl); if (target_nid == -1) { - /* - * Account for the fault against the current node if it not - * being replaced regardless of where the page is located. - */ - current_nid = numa_node_id(); put_page(page); goto out; } @@ -3583,11 +3578,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Migrate to the requested node */ migrated = migrate_misplaced_page(page, target_nid); if (migrated) - current_nid = target_nid; + page_nid = target_nid; out: - if (current_nid != -1) - task_numa_fault(current_nid, 1, migrated); + if (page_nid != -1) + task_numa_fault(page_nid, 1, migrated); return 0; } @@ -3602,7 +3597,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long offset; spinlock_t *ptl; bool numa = false; - int local_nid = numa_node_id(); spin_lock(&mm->page_table_lock); pmd = *pmdp; @@ -3625,9 +3619,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { pte_t pteval = *pte; struct page *page; - int curr_nid = local_nid; + int page_nid = -1; int target_nid; - bool migrated; + bool migrated = false; + if (!pte_present(pteval)) continue; if (!pte_numa(pteval)) @@ -3649,25 +3644,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(page_mapcount(page) != 1)) continue; - /* - * Note that the NUMA fault is later accounted to either - * the node that is currently running or where the page is - * migrated to. - */ - curr_nid = local_nid; - target_nid = numa_migrate_prep(page, vma, addr, - page_to_nid(page)); - if (target_nid == -1) { + page_nid = page_to_nid(page); + target_nid = numa_migrate_prep(page, vma, addr, page_nid); + pte_unmap_unlock(pte, ptl); + if (target_nid != -1) { + migrated = migrate_misplaced_page(page, target_nid); + if (migrated) + page_nid = target_nid; + } else { put_page(page); - continue; } - /* Migrate to the requested node */ - pte_unmap_unlock(pte, ptl); - migrated = migrate_misplaced_page(page, target_nid); - if (migrated) - curr_nid = target_nid; - task_numa_fault(curr_nid, 1, migrated); + if (page_nid != -1) + task_numa_fault(page_nid, 1, migrated); pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); } diff --git a/mm/migrate.c b/mm/migrate.c index 7a7325ee1d08..c04692774e88 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1715,12 +1715,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, unlock_page(new_page); put_page(new_page); /* Free it */ - unlock_page(page); + /* Retake the callers reference and putback on LRU */ + get_page(page); putback_lru_page(page); - - count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); - isolated = 0; - goto out; + mod_zone_page_state(page_zone(page), + NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); + goto out_fail; } /* @@ -1737,9 +1737,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); - page_add_new_anon_rmap(new_page, vma, haddr); - + pmdp_clear_flush(vma, haddr, pmd); set_pmd_at(mm, haddr, pmd, entry); + page_add_new_anon_rmap(new_page, vma, haddr); update_mmu_cache_pmd(vma, address, &entry); page_remove_rmap(page); /* @@ -1758,7 +1758,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); -out: mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); @@ -1767,6 +1766,10 @@ out: out_fail: count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); out_dropref: + entry = pmd_mknonnuma(entry); + set_pmd_at(mm, haddr, pmd, entry); + update_mmu_cache_pmd(vma, address, &entry); + unlock_page(page); put_page(page); return 0; diff --git a/mm/mprotect.c b/mm/mprotect.c index a3af058f68e4..412ba2b7326a 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -148,7 +148,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, split_huge_page_pmd(vma, addr, pmd); else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) { - pages += HPAGE_PMD_NR; + pages++; continue; } /* fall through */ diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 5da2cbcfdbb5..2beeabf502c5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end, if (err) break; pgd++; - } while (addr = next, addr != end); + } while (addr = next, addr < end); return err; } diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 487ac6f37ca2..9a11f9f799f4 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -55,6 +55,7 @@ static struct sym_entry *table; static unsigned int table_size, table_cnt; static int all_symbols = 0; static char symbol_prefix_char = '\0'; +static unsigned long long kernel_start_addr = 0; int token_profit[0x10000]; @@ -65,7 +66,10 @@ unsigned char best_table_len[256]; static void usage(void) { - fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n"); + fprintf(stderr, "Usage: kallsyms [--all-symbols] " + "[--symbol-prefix=<prefix char>] " + "[--page-offset=<CONFIG_PAGE_OFFSET>] " + "< in.map > out.S\n"); exit(1); } @@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s) int i; int offset = 1; + if (s->addr < kernel_start_addr) + return 0; + /* skip prefix char */ if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char) offset++; @@ -646,6 +653,9 @@ int main(int argc, char **argv) if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) p++; symbol_prefix_char = *p; + } else if (strncmp(argv[i], "--page-offset=", 14) == 0) { + const char *p = &argv[i][14]; + kernel_start_addr = strtoull(p, NULL, 16); } else usage(); } diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 014994936b1c..32b10f53d0b4 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -82,6 +82,8 @@ kallsyms() kallsymopt="${kallsymopt} --all-symbols" fi + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" + local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 17f45e8aa89c..e1e9e0c999fe 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device) struct snd_pcm *pcm; list_for_each_entry(pcm, &snd_pcm_devices, list) { + if (pcm->internal) + continue; if (pcm->card == card && pcm->device == device) return pcm; } @@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device) struct snd_pcm *pcm; list_for_each_entry(pcm, &snd_pcm_devices, list) { + if (pcm->internal) + continue; if (pcm->card == card && pcm->device > device) return pcm->device; else if (pcm->card->number > card->number) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 5b6c4e3c92ca..748c6a941963 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4864,8 +4864,8 @@ static void hda_power_work(struct work_struct *work) spin_unlock(&codec->power_lock); state = hda_call_codec_suspend(codec, true); - codec->pm_down_notified = 0; - if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { + if (!codec->pm_down_notified && + !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { codec->pm_down_notified = 1; hda_call_pm_notify(bus, false); } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 26ad4f0aade3..b7c89dff7066 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -4475,9 +4475,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec) true, &spec->vmaster_mute.sw_kctl); if (err < 0) return err; - if (spec->vmaster_mute.hook) + if (spec->vmaster_mute.hook) { snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, spec->vmaster_mute_enum); + snd_hda_sync_vmaster_hook(&spec->vmaster_mute); + } } free_kctls(spec); /* no longer needed */ diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 0cbdd87dde6d..2aa2f579b4d6 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -968,6 +968,15 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec, } } +static void ad1884_fixup_thinkpad(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct ad198x_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->gen.keep_eapd_on = 1; +} + /* set magic COEFs for dmic */ static const struct hda_verb ad1884_dmic_init_verbs[] = { {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7}, @@ -979,6 +988,7 @@ enum { AD1884_FIXUP_AMP_OVERRIDE, AD1884_FIXUP_HP_EAPD, AD1884_FIXUP_DMIC_COEF, + AD1884_FIXUP_THINKPAD, AD1884_FIXUP_HP_TOUCHSMART, }; @@ -997,6 +1007,12 @@ static const struct hda_fixup ad1884_fixups[] = { .type = HDA_FIXUP_VERBS, .v.verbs = ad1884_dmic_init_verbs, }, + [AD1884_FIXUP_THINKPAD] = { + .type = HDA_FIXUP_FUNC, + .v.func = ad1884_fixup_thinkpad, + .chained = true, + .chain_id = AD1884_FIXUP_DMIC_COEF, + }, [AD1884_FIXUP_HP_TOUCHSMART] = { .type = HDA_FIXUP_VERBS, .v.verbs = ad1884_dmic_init_verbs, @@ -1008,7 +1024,7 @@ static const struct hda_fixup ad1884_fixups[] = { static const struct snd_pci_quirk ad1884_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART), SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD), - SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF), + SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bf313bea7085..8ad554312b69 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4623,6 +4623,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4), + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4), SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 8b50e5958de5..01daf655e20b 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w, hubs->hp_startup_mode); break; } + break; case SND_SOC_DAPM_PRE_PMD: snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1, diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index c17c14c394df..b2949aed1ac2 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1949,7 +1949,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, w->active ? "active" : "inactive"); list_for_each_entry(p, &w->sources, list_sink) { - if (p->connected && !p->connected(w, p->sink)) + if (p->connected && !p->connected(w, p->source)) continue; if (p->connect) @@ -3495,6 +3495,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, if (!w) { dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dai->driver->playback.stream_name); + return -ENOMEM; } w->priv = dai; @@ -3513,6 +3514,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, if (!w) { dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dai->driver->capture.stream_name); + return -ENOMEM; } w->priv = dai; diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index e297b74471b8..ca0d3d9f4bac 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -90,8 +90,20 @@ OPTIONS Number of mmap data pages. Must be a power of two. -g:: + Enables call-graph (stack chain/backtrace) recording. + --call-graph:: - Do call-graph (stack chain/backtrace) recording. + Setup and enable call-graph (stack chain/backtrace) recording, + implies -g. + + Allows specifying "fp" (frame pointer) or "dwarf" + (DWARF's CFI - Call Frame Information) as the method to collect + the information used to show the call graphs. + + In some systems, where binaries are build with gcc + --fomit-frame-pointer, using the "fp" method will produce bogus + call graphs, using "dwarf", if available (perf tools linked to + the libunwind library) should be used instead. -q:: --quiet:: diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 58d6598a9686..6a118e71d003 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -140,20 +140,12 @@ Default is to monitor all CPUS. --asm-raw:: Show raw instruction encoding of assembly instructions. --G [type,min,order]:: +-G:: + Enables call-graph (stack chain/backtrace) recording. + --call-graph:: - Display call chains using type, min percent threshold and order. - type can be either: - - flat: single column, linear exposure of call chains. - - graph: use a graph tree, displaying absolute overhead rates. - - fractal: like graph, but displays relative rates. Each branch of - the tree is considered as a new profiled object. - - order can be either: - - callee: callee based call graph. - - caller: inverted caller based call graph. - - Default: fractal,0.5,callee. + Setup and enable call-graph (stack chain/backtrace) recording, + implies -G. --ignore-callees=<regex>:: Ignore callees of the function(s) matching the given regex. diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 935d52216c89..fbc2888d6495 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -888,11 +888,18 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { err = perf_evlist__parse_sample(kvm->evlist, event, &sample); if (err) { + perf_evlist__mmap_consume(kvm->evlist, idx); pr_err("Failed to parse sample\n"); return -1; } err = perf_session_queue_event(kvm->session, event, &sample, 0); + /* + * FIXME: Here we can't consume the event, as perf_session_queue_event will + * point to it, and it'll get possibly overwritten by the kernel. + */ + perf_evlist__mmap_consume(kvm->evlist, idx); + if (err) { pr_err("Failed to enqueue sample: %d\n", err); return -1; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a41ac41546c9..d04651484640 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -712,21 +712,12 @@ static int get_stack_size(char *str, unsigned long *_size) } #endif /* LIBUNWIND_SUPPORT */ -int record_parse_callchain_opt(const struct option *opt, - const char *arg, int unset) +int record_parse_callchain(const char *arg, struct perf_record_opts *opts) { - struct perf_record_opts *opts = opt->value; char *tok, *name, *saveptr = NULL; char *buf; int ret = -1; - /* --no-call-graph */ - if (unset) - return 0; - - /* We specified default option if none is provided. */ - BUG_ON(!arg); - /* We need buffer that we know we can write to. */ buf = malloc(strlen(arg) + 1); if (!buf) @@ -764,13 +755,9 @@ int record_parse_callchain_opt(const struct option *opt, ret = get_stack_size(tok, &size); opts->stack_dump_size = size; } - - if (!ret) - pr_debug("callchain: stack dump size %d\n", - opts->stack_dump_size); #endif /* LIBUNWIND_SUPPORT */ } else { - pr_err("callchain: Unknown -g option " + pr_err("callchain: Unknown --call-graph option " "value: %s\n", arg); break; } @@ -778,13 +765,52 @@ int record_parse_callchain_opt(const struct option *opt, } while (0); free(buf); + return ret; +} + +static void callchain_debug(struct perf_record_opts *opts) +{ + pr_debug("callchain: type %d\n", opts->call_graph); + if (opts->call_graph == CALLCHAIN_DWARF) + pr_debug("callchain: stack dump size %d\n", + opts->stack_dump_size); +} + +int record_parse_callchain_opt(const struct option *opt, + const char *arg, + int unset) +{ + struct perf_record_opts *opts = opt->value; + int ret; + + /* --no-call-graph */ + if (unset) { + opts->call_graph = CALLCHAIN_NONE; + pr_debug("callchain: disabled\n"); + return 0; + } + + ret = record_parse_callchain(arg, opts); if (!ret) - pr_debug("callchain: type %d\n", opts->call_graph); + callchain_debug(opts); return ret; } +int record_callchain_opt(const struct option *opt, + const char *arg __maybe_unused, + int unset __maybe_unused) +{ + struct perf_record_opts *opts = opt->value; + + if (opts->call_graph == CALLCHAIN_NONE) + opts->call_graph = CALLCHAIN_FP; + + callchain_debug(opts); + return 0; +} + static const char * const record_usage[] = { "perf record [<options>] [<command>]", "perf record [<options>] -- <command> [<options>]", @@ -813,12 +839,12 @@ static struct perf_record record = { }, }; -#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " +#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " #ifdef LIBUNWIND_SUPPORT -const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; +const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf"; #else -const char record_callchain_help[] = CALLCHAIN_HELP "[fp]"; +const char record_callchain_help[] = CALLCHAIN_HELP "fp"; #endif /* @@ -858,9 +884,12 @@ const struct option record_options[] = { "number of mmap data pages"), OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), - OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts, - "mode[,dump_size]", record_callchain_help, - &record_parse_callchain_opt, "fp"), + OPT_CALLBACK_NOOPT('g', NULL, &record.opts, + NULL, "enables call-graph recording" , + &record_callchain_opt), + OPT_CALLBACK(0, "call-graph", &record.opts, + "mode[,dump_size]", record_callchain_help, + &record_parse_callchain_opt), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 212214162bb2..5a11f13e56f9 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -810,7 +810,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) ret = perf_evlist__parse_sample(top->evlist, event, &sample); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); - continue; + goto next_event; } evsel = perf_evlist__id2evsel(session->evlist, sample.id); @@ -825,13 +825,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) case PERF_RECORD_MISC_USER: ++top->us_samples; if (top->hide_user_symbols) - continue; + goto next_event; machine = &session->machines.host; break; case PERF_RECORD_MISC_KERNEL: ++top->kernel_samples; if (top->hide_kernel_symbols) - continue; + goto next_event; machine = &session->machines.host; break; case PERF_RECORD_MISC_GUEST_KERNEL: @@ -847,7 +847,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) */ /* Fall thru */ default: - continue; + goto next_event; } @@ -859,6 +859,8 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) machine__process_event(machine, event); } else ++session->stats.nr_unknown_events; +next_event: + perf_evlist__mmap_consume(top->evlist, idx); } } @@ -1016,16 +1018,16 @@ out_delete: } static int -parse_callchain_opt(const struct option *opt, const char *arg, int unset) +callchain_opt(const struct option *opt, const char *arg, int unset) { - /* - * --no-call-graph - */ - if (unset) - return 0; - symbol_conf.use_callchain = true; + return record_callchain_opt(opt, arg, unset); +} +static int +parse_callchain_opt(const struct option *opt, const char *arg, int unset) +{ + symbol_conf.use_callchain = true; return record_parse_callchain_opt(opt, arg, unset); } @@ -1106,9 +1108,12 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts, - "mode[,dump_size]", record_callchain_help, - &parse_callchain_opt, "fp"), + OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts, + NULL, "enables call-graph recording", + &callchain_opt), + OPT_CALLBACK(0, "call-graph", &top.record_opts, + "mode[,dump_size]", record_callchain_help, + &parse_callchain_opt), OPT_CALLBACK(0, "ignore-callees", NULL, "regex", "ignore callees of these functions in call graphs", report_parse_ignore_callees_opt), diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 71aa3e35406b..99c8d9ad6729 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -987,7 +987,7 @@ again: err = perf_evlist__parse_sample(evlist, event, &sample); if (err) { fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); - continue; + goto next_event; } if (trace->base_time == 0) @@ -1001,18 +1001,20 @@ again: evsel = perf_evlist__id2evsel(evlist, sample.id); if (evsel == NULL) { fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); - continue; + goto next_event; } if (sample.raw_data == NULL) { fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", perf_evsel__name(evsel), sample.tid, sample.cpu, sample.raw_size); - continue; + goto next_event; } handler = evsel->handler.func; handler(trace, evsel, &sample); +next_event: + perf_evlist__mmap_consume(evlist, i); if (done) goto out_unmap_evlist; diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 6fb781d5586c..e3fedfa2906e 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -290,6 +290,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist, for (i = 0; i < evlist->nr_mmaps; i++) { while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { ret = process_event(machine, evlist, event, state); + perf_evlist__mmap_consume(evlist, i); if (ret < 0) return ret; } diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c index d444ea2c47d9..376c35608534 100644 --- a/tools/perf/tests/keep-tracking.c +++ b/tools/perf/tests/keep-tracking.c @@ -36,6 +36,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm) (pid_t)event->comm.tid == getpid() && strcmp(event->comm.comm, comm) == 0) found += 1; + perf_evlist__mmap_consume(evlist, i); } } return found; diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index c4185b9aeb80..a7232c204eb9 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -122,6 +122,7 @@ int test__basic_mmap(void) goto out_munmap; } nr_events[evsel->idx]++; + perf_evlist__mmap_consume(evlist, 0); } err = 0; diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c index fc5b9fca8b47..524b221b829b 100644 --- a/tools/perf/tests/open-syscall-tp-fields.c +++ b/tools/perf/tests/open-syscall-tp-fields.c @@ -77,8 +77,10 @@ int test__syscall_open_tp_fields(void) ++nr_events; - if (type != PERF_RECORD_SAMPLE) + if (type != PERF_RECORD_SAMPLE) { + perf_evlist__mmap_consume(evlist, i); continue; + } err = perf_evsel__parse_sample(evsel, event, &sample); if (err) { diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index b8a7056519ac..7923b06ffc91 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -263,6 +263,8 @@ int test__PERF_RECORD(void) type); ++errs; } + + perf_evlist__mmap_consume(evlist, i); } } diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c index 0ab61b1f408e..4ca1b938f6a6 100644 --- a/tools/perf/tests/perf-time-to-tsc.c +++ b/tools/perf/tests/perf-time-to-tsc.c @@ -122,7 +122,7 @@ int test__perf_time_to_tsc(void) if (event->header.type != PERF_RECORD_COMM || (pid_t)event->comm.pid != getpid() || (pid_t)event->comm.tid != getpid()) - continue; + goto next_event; if (strcmp(event->comm.comm, comm1) == 0) { CHECK__(perf_evsel__parse_sample(evsel, event, @@ -134,6 +134,8 @@ int test__perf_time_to_tsc(void) &sample)); comm2_time = sample.time; } +next_event: + perf_evlist__mmap_consume(evlist, i); } } diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 2e41e2d32ccc..6e2b44ec0749 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c @@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) - continue; + goto next_event; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { @@ -88,6 +88,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) total_periods += sample.period; nr_samples++; +next_event: + perf_evlist__mmap_consume(evlist, 0); } if ((u64) nr_samples == total_periods) { diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 28fe5894b061..a3e64876e940 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -96,10 +96,10 @@ int test__task_exit(void) retry: while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { - if (event->header.type != PERF_RECORD_EXIT) - continue; + if (event->header.type == PERF_RECORD_EXIT) + nr_exit++; - nr_exit++; + perf_evlist__mmap_consume(evlist, 0); } if (!exited || !nr_exit) { diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 194e2f42ff5d..6c152686e837 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -315,8 +315,7 @@ static inline void advance_hpp(struct perf_hpp *hpp, int inc) } static int hist_entry__period_snprintf(struct perf_hpp *hpp, - struct hist_entry *he, - bool color) + struct hist_entry *he) { const char *sep = symbol_conf.field_sep; struct perf_hpp_fmt *fmt; @@ -338,7 +337,7 @@ static int hist_entry__period_snprintf(struct perf_hpp *hpp, } else first = false; - if (color && fmt->color) + if (perf_hpp__use_color() && fmt->color) ret = fmt->color(fmt, hpp, he); else ret = fmt->entry(fmt, hpp, he); @@ -358,12 +357,11 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size, .buf = bf, .size = size, }; - bool color = !symbol_conf.field_sep; if (size == 0 || size > bfsz) size = hpp.size = bfsz; - ret = hist_entry__period_snprintf(&hpp, he, color); + ret = hist_entry__period_snprintf(&hpp, he); hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); ret = fprintf(fp, "%s\n", bf); @@ -482,6 +480,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, print_entries: linesz = hists__sort_list_width(hists) + 3 + 1; + linesz += perf_hpp__color_overhead(); line = malloc(linesz); if (line == NULL) { ret = -1; diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 2b585bc308cf..9e99060408ae 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -147,6 +147,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor) struct option; +int record_parse_callchain(const char *arg, struct perf_record_opts *opts); int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); +int record_callchain_opt(const struct option *opt, const char *arg, int unset); + extern const char record_callchain_help[]; #endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 9b393e7dca6f..49096ea58a15 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, return -1; } - event->header.type = PERF_RECORD_MMAP2; + event->header.type = PERF_RECORD_MMAP; /* * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c */ @@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, char prot[5]; char execname[PATH_MAX]; char anonstr[] = "//anon"; - unsigned int ino; size_t size; ssize_t n; @@ -209,15 +208,12 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, strcpy(execname, ""); /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", - &event->mmap2.start, &event->mmap2.len, prot, - &event->mmap2.pgoff, &event->mmap2.maj, - &event->mmap2.min, - &ino, execname); - - event->mmap2.ino = (u64)ino; + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n", + &event->mmap.start, &event->mmap.len, prot, + &event->mmap.pgoff, + execname); - if (n != 8) + if (n != 5) continue; if (prot[2] != 'x') @@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, strcpy(execname, anonstr); size = strlen(execname) + 1; - memcpy(event->mmap2.filename, execname, size); + memcpy(event->mmap.filename, execname, size); size = PERF_ALIGN(size, sizeof(u64)); - event->mmap2.len -= event->mmap.start; - event->mmap2.header.size = (sizeof(event->mmap2) - - (sizeof(event->mmap2.filename) - size)); - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); - event->mmap2.header.size += machine->id_hdr_size; - event->mmap2.pid = tgid; - event->mmap2.tid = pid; + event->mmap.len -= event->mmap.start; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size)); + memset(event->mmap.filename + size, 0, machine->id_hdr_size); + event->mmap.header.size += machine->id_hdr_size; + event->mmap.pid = tgid; + event->mmap.tid = pid; if (process(tool, event, &synth_sample, machine) != 0) { rc = -1; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index f9f77bee0b1b..e584cd30b0f2 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -545,12 +545,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) md->prev = old; - if (!evlist->overwrite) - perf_mmap__write_tail(md, old); - return event; } +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) +{ + if (!evlist->overwrite) { + struct perf_mmap *md = &evlist->mmap[idx]; + unsigned int old = md->prev; + + perf_mmap__write_tail(md, old); + } +} + static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) { if (evlist->mmap[idx].base != NULL) { diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 880d7139d2fb..206d09339306 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -89,6 +89,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); + int perf_evlist__open(struct perf_evlist *evlist); void perf_evlist__close(struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 0ce9febf1ba0..9f1ef9bee2d0 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -678,7 +678,6 @@ void perf_evsel__config(struct perf_evsel *evsel, attr->sample_type |= PERF_SAMPLE_WEIGHT; attr->mmap = track; - attr->mmap2 = track && !perf_missing_features.mmap2; attr->comm = track; /* diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 1329b6b6ffe6..ce8dc61ce2c3 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -5,6 +5,7 @@ #include <pthread.h> #include "callchain.h" #include "header.h" +#include "color.h" extern struct callchain_param callchain_param; @@ -175,6 +176,18 @@ void perf_hpp__init(void); void perf_hpp__column_register(struct perf_hpp_fmt *format); void perf_hpp__column_enable(unsigned col); +static inline size_t perf_hpp__use_color(void) +{ + return !symbol_conf.field_sep; +} + +static inline size_t perf_hpp__color_overhead(void) +{ + return perf_hpp__use_color() ? + (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX + : 0; +} + struct perf_evlist; struct hist_browser_timer { diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c09e0a9fdf4c..f0692737ebf1 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1357,10 +1357,10 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, goto post; } + fname = dwarf_decl_file(&spdie); if (addr == (unsigned long)baseaddr) { /* Function entry - Relative line number is 0 */ lineno = baseline; - fname = dwarf_decl_file(&spdie); goto post; } diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 71b5412bbbb9..2ac4bc92bb1f 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -822,6 +822,8 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, PyObject *pyevent = pyrf_event__new(event); struct pyrf_event *pevent = (struct pyrf_event *)pyevent; + perf_evlist__mmap_consume(evlist, cpu); + if (pyevent == NULL) return PyErr_NoMemory(); diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index a85e4ae5f3ac..c0c9795c4f02 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, event = find_cache_event(evsel); if (!event) - die("ug! no event found for type %" PRIu64, evsel->attr.config); + die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); pid = raw_field_value(event, "common_pid", data); diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index cc75a3cef388..95d91a0b23af 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -56,6 +56,17 @@ static void handler_call_die(const char *handler_name) Py_FatalError("problem in Python trace event handler"); } +/* + * Insert val into into the dictionary and decrement the reference counter. + * This is necessary for dictionaries since PyDict_SetItemString() does not + * steal a reference, as opposed to PyTuple_SetItem(). + */ +static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) +{ + PyDict_SetItemString(dict, key, val); + Py_DECREF(val); +} + static void define_value(enum print_arg_type field_type, const char *ev_name, const char *field_name, @@ -279,11 +290,11 @@ static void python_process_tracepoint(union perf_event *perf_event PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); PyTuple_SetItem(t, n++, PyString_FromString(comm)); } else { - PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu)); - PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s)); - PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns)); - PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid)); - PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm)); + pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu)); + pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s)); + pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns)); + pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid)); + pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm)); } for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { @@ -313,7 +324,7 @@ static void python_process_tracepoint(union perf_event *perf_event if (handler) PyTuple_SetItem(t, n++, obj); else - PyDict_SetItemString(dict, field->name, obj); + pydict_set_item_string_decref(dict, field->name, obj); } if (!handler) @@ -370,21 +381,21 @@ static void python_process_general_event(union perf_event *perf_event if (!handler || !PyCallable_Check(handler)) goto exit; - PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); - PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); + pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize( (const char *)&evsel->attr, sizeof(evsel->attr))); - PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize( (const char *)sample, sizeof(*sample))); - PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize( (const char *)sample->raw_data, sample->raw_size)); - PyDict_SetItemString(dict, "comm", + pydict_set_item_string_decref(dict, "comm", PyString_FromString(thread->comm)); if (al->map) { - PyDict_SetItemString(dict, "dso", + pydict_set_item_string_decref(dict, "dso", PyString_FromString(al->map->dso->name)); } if (al->sym) { - PyDict_SetItemString(dict, "symbol", + pydict_set_item_string_decref(dict, "symbol", PyString_FromString(al->sym->name)); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a9dd682cf5e3..1cf9ccb01013 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3091,7 +3091,7 @@ static const struct file_operations *stat_fops[] = { static int kvm_init_debug(void) { - int r = -EFAULT; + int r = -EEXIST; struct kvm_stats_debugfs_item *p; kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); |