diff options
317 files changed, 3460 insertions, 3073 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -43,7 +43,7 @@ o udev 081 # udevd --version o grub 0.93 # grub --version || grub-install --version o mcelog 0.6 # mcelog --version o iptables 1.4.2 # iptables -V -o openssl & libcrypto 1.0.1k # openssl version +o openssl & libcrypto 1.0.0 # openssl version Kernel compilation diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 435251fa9ce0..4b7c3d9b29bb 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -8,6 +8,8 @@ Required properties: - compatible : should be one of "arm,armv8-pmuv3" + "arm.cortex-a57-pmu" + "arm.cortex-a53-pmu" "arm,cortex-a17-pmu" "arm,cortex-a15-pmu" "arm,cortex-a12-pmu" diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt @@ -25,7 +25,7 @@ Example: /* Cypress Gen3 touchpad */ touchpad@67 { compatible = "cypress,cyapa"; - reg = <0x24>; + reg = <0x67>; interrupt-parent = <&gpio>; interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ wakeup-source; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt @@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority interrupt. Required Properties: -- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" - as fallback +- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or + "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" - reg: Base address and size of the controllers memory area - interrupt-parent: phandle of the parent interrupt controller. - interrupts: Interrupt specifier for the controllers interrupt. @@ -13,6 +13,9 @@ Required Properties: - #interrupt-cells : Specifies the number of cells needed to encode interrupt source, should be 1 +Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, +use ar7240 for all other SoCs. + Please refer to interrupts.txt in this directory for details of the common Interrupt Controllers bindings used by client devices. @@ -28,3 +31,16 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +Another example: + + interrupt-controller@18060010 { + compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; + reg = <0x18060010 0x4>; + + interrupt-parent = <&cpuintc>; + interrupts = <6>; + + interrupt-controller; + #interrupt-cells = <1>; + }; diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is ABS_MT_POSITION_X := T_X ABS_MT_POSITION_Y := T_Y ABS_MT_TOOL_X := C_X - ABS_MT_TOOL_X := C_Y + ABS_MT_TOOL_Y := C_Y Unfortunately, there is not enough information to specify both the touching ellipse and the tool ellipse, so one has to resort to approximations. One diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt @@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned (alternatively, the runtime_suspend() callback will have to check if the device should really be suspended and return -EAGAIN if that is not the case). -The runtime PM of PCI devices is disabled by default. It is also blocked by -pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI -driver implements the runtime PM callbacks and intends to use the runtime PM -framework provided by the PM core and the PCI subsystem, it should enable this -feature by executing the pm_runtime_enable() helper function. However, the -driver should not call the pm_runtime_allow() helper function unblocking -the runtime PM of the device. Instead, it should allow user space or some -platform-specific code to do that (user space can do it via sysfs), although -once it has called pm_runtime_enable(), it must be prepared to handle the +The runtime PM of PCI devices is enabled by default by the PCI core. PCI +device drivers do not need to enable it and should not attempt to do so. +However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() +helper function. In addition to that, the runtime PM usage counter of +each PCI device is incremented by local_pci_probe() before executing the +probe callback provided by the device's driver. + +If a PCI driver implements the runtime PM callbacks and intends to use the +runtime PM framework provided by the PM core and the PCI subsystem, it needs +to decrement the device's runtime PM usage counter in its probe callback +function. If it doesn't do that, the counter will always be different from +zero for the device and it will never be runtime-suspended. The simplest +way to do that is by calling pm_runtime_put_noidle(), but if the driver +wants to schedule an autosuspend right away, for example, it may call +pm_runtime_put_autosuspend() instead for this purpose. Generally, it +just needs to call a function that decrements the devices usage counter +from its probe routine to make runtime PM work for the device. + +It is important to remember that the driver's runtime_suspend() callback +may be executed right after the usage counter has been decremented, because +user space may already have cuased the pm_runtime_allow() helper function +unblocking the runtime PM of the device to run via sysfs, so the driver must +be prepared to cope with that. + +The driver itself should not call pm_runtime_allow(), though. Instead, it +should let user space or some platform-specific code do that (user space can +do it via sysfs as stated above), but it must be prepared to handle the runtime PM of the device correctly as soon as pm_runtime_allow() is called -(which may happen at any time). [It also is possible that user space causes -pm_runtime_allow() to be called via sysfs before the driver is loaded, so in -fact the driver has to be prepared to handle the runtime PM of the device as -soon as it calls pm_runtime_enable().] +(which may happen at any time, even before the driver is loaded). + +When the driver's remove callback runs, it has to balance the decrementation +of the device's runtime PM usage counter at the probe time. For this reason, +if it has decremented the counter in its probe callback, it must run +pm_runtime_get_noresume() in its remove callback. [Since the core carries +out a runtime resume of the device and bumps up the device's usage counter +before running the driver's remove callback, the runtime PM of the device +is effectively disabled for the duration of the remove execution and all +runtime PM helper functions incrementing the device's usage counter are +then effectively equivalent to pm_runtime_get_noresume().] The runtime PM framework works by processing requests to suspend or resume devices, or to check if they are idle (in which cases it is reasonable to diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c @@ -18,6 +18,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ #include <errno.h> #include <fcntl.h> #include <inttypes.h> diff --git a/MAINTAINERS b/MAINTAINERS index 9f6685f6c5a9..8addb19c299b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -822,12 +822,13 @@ F: arch/arm/include/asm/floppy.h ARM PMU PROFILING AND DEBUGGING M: Will Deacon <will.deacon@arm.com> +R: Mark Rutland <mark.rutland@arm.com> S: Maintained -F: arch/arm/kernel/perf_* +F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c -F: arch/arm/kernel/hw_breakpoint.c -F: arch/arm/include/asm/hw_breakpoint.h -F: arch/arm/include/asm/perf_event.h +F: arch/arm*/kernel/hw_breakpoint.c +F: arch/arm*/include/asm/hw_breakpoint.h +F: arch/arm*/include/asm/perf_event.h F: drivers/perf/arm_pmu.c F: include/linux/perf/arm_pmu.h @@ -5957,7 +5958,7 @@ F: virt/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V M: Joerg Roedel <joro@8bytes.org> L: kvm@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c @@ -5965,7 +5966,7 @@ F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC M: Alexander Graf <agraf@suse.com> L: kvm-ppc@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ T: git git://github.com/agraf/linux-2.6.git S: Supported F: arch/powerpc/include/asm/kvm* @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -48,4 +48,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 07d1811aa03f..98b4f98a13de 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -454,12 +454,8 @@ config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM config HW_PERF_EVENTS - bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS - default y - help - Enable hardware performance counter support for perf events. If - disabled, perf events will use software events only. + def_bool y + depends on ARM_PMU config SYS_SUPPORTS_HUGETLBFS def_bool y diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts index c62751153a4f..734e1272b19f 100644 --- a/arch/arm64/boot/dts/arm/juno-r1.dts +++ b/arch/arm64/boot/dts/arm/juno-r1.dts @@ -91,17 +91,21 @@ }; }; - pmu { - compatible = "arm,armv8-pmuv3"; + pmu_a57 { + compatible = "arm,cortex-a57-pmu"; interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>; + interrupt-affinity = <&A57_0>, + <&A57_1>; + }; + + pmu_a53 { + compatible = "arm,cortex-a53-pmu"; + interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; - interrupt-affinity = <&A57_0>, - <&A57_1>, - <&A53_0>, + interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d7cbdd482a61..ffa05aeab3c7 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -91,17 +91,21 @@ }; }; - pmu { - compatible = "arm,armv8-pmuv3"; + pmu_a57 { + compatible = "arm,cortex-a57-pmu"; interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>; + interrupt-affinity = <&A57_0>, + <&A57_1>; + }; + + pmu_a53 { + compatible = "arm,cortex-a53-pmu"; + interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; - interrupt-affinity = <&A57_0>, - <&A57_1>, - <&A53_0>, + interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 34d71dd86781..c8ccda16e079 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -109,6 +109,10 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_SAMSUNG=y +CONFIG_SERIAL_SAMSUNG_UARTS_4=y +CONFIG_SERIAL_SAMSUNG_UARTS=4 +CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y @@ -145,6 +149,10 @@ CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SPI=y +CONFIG_MMC_DW=y +CONFIG_MMC_DW_IDMAC=y +CONFIG_MMC_DW_PLTFM=y +CONFIG_MMC_DW_EXYNOS=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_SYSCON=y diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index c75b8d027eb1..54efedaf331f 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -115,6 +115,13 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); +static inline void __local_flush_icache_all(void) +{ + asm("ic iallu"); + dsb(nsh); + isb(); +} + static inline void __flush_icache_all(void) { asm("ic ialluis"); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 6b4c3ad75a2a..11ccf6c09533 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -42,12 +42,14 @@ * PAGE_OFFSET - the virtual address of the start of the kernel image (top * (VA_BITS - 1)) * VA_BITS - the maximum number of bits for virtual addresses. + * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 128MB of the kernel text. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) +#define VA_START (UL(0xffffffffffffffff) << VA_BITS) #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 030208767185..990124a67eeb 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,15 +17,16 @@ #define __ASM_MMU_H typedef struct { - unsigned int id; - raw_spinlock_t id_lock; - void *vdso; + atomic64_t id; + void *vdso; } mm_context_t; -#define INIT_MM_CONTEXT(name) \ - .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), - -#define ASID(mm) ((mm)->context.id & 0xffff) +/* + * This macro is only used by the TLBI code, which cannot race with an + * ASID change and therefore doesn't need to reload the counter using + * atomic64_read. + */ +#define ASID(mm) ((mm)->context.id.counter & 0xffff) extern void paging_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 8ec41e5f56f0..c0e87898ba96 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -28,13 +28,6 @@ #include <asm/cputype.h> #include <asm/pgtable.h> -#define MAX_ASID_BITS 16 - -extern unsigned int cpu_last_asid; - -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void __new_context(struct mm_struct *mm); - #ifdef CONFIG_PID_IN_CONTEXTIDR static inline void contextidr_thread_switch(struct task_struct *next) { @@ -77,96 +70,38 @@ static inline bool __cpu_uses_extended_idmap(void) unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); } -static inline void __cpu_set_tcr_t0sz(u64 t0sz) -{ - unsigned long tcr; - - if (__cpu_uses_extended_idmap()) - asm volatile ( - " mrs %0, tcr_el1 ;" - " bfi %0, %1, %2, %3 ;" - " msr tcr_el1, %0 ;" - " isb" - : "=&r" (tcr) - : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); -} - -/* - * Set TCR.T0SZ to the value appropriate for activating the identity map. - */ -static inline void cpu_set_idmap_tcr_t0sz(void) -{ - __cpu_set_tcr_t0sz(idmap_t0sz); -} - /* * Set TCR.T0SZ to its default value (based on VA_BITS) */ static inline void cpu_set_default_tcr_t0sz(void) { - __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); -} - -static inline void switch_new_context(struct mm_struct *mm) -{ - unsigned long flags; - - __new_context(mm); + unsigned long tcr; - local_irq_save(flags); - cpu_switch_mm(mm->pgd, mm); - local_irq_restore(flags); -} + if (!__cpu_uses_extended_idmap()) + return; -static inline void check_and_switch_context(struct mm_struct *mm, - struct task_struct *tsk) -{ - /* - * Required during context switch to avoid speculative page table - * walking with the wrong TTBR. - */ - cpu_set_reserved_ttbr0(); - - if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) - /* - * The ASID is from the current generation, just switch to the - * new pgd. This condition is only true for calls from - * context_switch() and interrupts are already disabled. - */ - cpu_switch_mm(mm->pgd, mm); - else if (irqs_disabled()) - /* - * Defer the new ASID allocation until after the context - * switch critical region since __new_context() cannot be - * called with interrupts disabled. - */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); - else - /* - * That is a direct call to switch_mm() or activate_mm() with - * interrupts enabled and a new context. - */ - switch_new_context(mm); + asm volatile ( + " mrs %0, tcr_el1 ;" + " bfi %0, %1, %2, %3 ;" + " msr tcr_el1, %0 ;" + " isb" + : "=&r" (tcr) + : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); } -#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) +/* + * It would be nice to return ASIDs back to the allocator, but unfortunately + * that introduces a race with a generation rollover where we could erroneously + * free an ASID allocated in a future generation. We could workaround this by + * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap), + * but we'd then need to make sure that we didn't dirty any TLBs afterwards. + * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you + * take CPU migration into account. + */ #define destroy_context(mm) do { } while(0) +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); -#define finish_arch_post_lock_switch \ - finish_arch_post_lock_switch -static inline void finish_arch_post_lock_switch(void) -{ - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { - struct mm_struct *mm = current->mm; - unsigned long flags; - - __new_context(mm); - - local_irq_save(flags); - cpu_switch_mm(mm->pgd, mm); - local_irq_restore(flags); - } -} +#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) /* * This is called when "tsk" is about to enter lazy TLB mode. @@ -194,6 +129,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, { unsigned int cpu = smp_processor_id(); + if (prev == next) + return; + /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. @@ -203,8 +141,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, return; } - if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) - check_and_switch_context(next, tsk); + check_and_switch_context(next, cpu); } #define deactivate_mm(tsk,mm) do { } while (0) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 7d9c7e4a424b..33892ef0172d 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -20,14 +20,20 @@ #define __ASM_PAGE_H /* PAGE_SHIFT determines the page size */ +/* CONT_SHIFT determines the number of pages which can be tracked together */ #ifdef CONFIG_ARM64_64K_PAGES #define PAGE_SHIFT 16 +#define CONT_SHIFT 5 #else #define PAGE_SHIFT 12 +#define CONT_SHIFT 4 #endif -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT)) +#define CONT_MASK (~(CONT_SIZE-1)) + /* * The idmap and swapper page tables need some space reserved in the kernel * image. Both require pgd, pud (4 levels only) and pmd tables to (section) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 24154b055835..95c1ec04a9ed 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -55,6 +55,13 @@ #define SECTION_MASK (~(SECTION_SIZE-1)) /* + * Contiguous page definitions. + */ +#define CONT_PTES (_AC(1, UL) << CONT_SHIFT) +/* the the numerical offset of the PTE within a range of CONT_PTES */ +#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1)) + +/* * Hardware page table definitions. * * Level 1 descriptor (PUD). @@ -83,6 +90,7 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52) #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) #define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) @@ -105,6 +113,7 @@ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ #define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */ +#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b0329be95cb1..3f481ef42c07 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -41,7 +41,7 @@ * fixed mappings and modules */ #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) -#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) +#define VMALLOC_START (VA_START) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) @@ -72,6 +72,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) @@ -79,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) @@ -140,6 +141,7 @@ extern struct page *empty_zero_page; #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) +#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) #ifdef CONFIG_ARM64_HW_AFDBM #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) @@ -202,6 +204,16 @@ static inline pte_t pte_mkspecial(pte_t pte) return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); } +static inline pte_t pte_mkcont(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(PTE_CONT)); +} + +static inline pte_t pte_mknoncont(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(PTE_CONT)); +} + static inline void set_pte(pte_t *ptep, pte_t pte) { *ptep = pte; @@ -496,7 +508,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); @@ -646,10 +658,10 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { /* - * set_pte() does not have a DSB for user mappings, so make sure that - * the page table write is visible. + * We don't do anything here, so there's a very small chance of + * us retaking a user fault which we just fixed up. The alternative + * is doing a dsb(ishst), but that penalises the fastpath. */ - dsb(ishst); } #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h deleted file mode 100644 index b7710a59672c..000000000000 --- a/arch/arm64/include/asm/pmu.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Based on arch/arm/include/asm/pmu.h - * - * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_PMU_H -#define __ASM_PMU_H - -#ifdef CONFIG_HW_PERF_EVENTS - -/* The events for a given PMU register set. */ -struct pmu_hw_events { - /* - * The events that are active on the PMU for the given index. - */ - struct perf_event **events; - - /* - * A 1 bit for an index indicates that the counter is being used for - * an event. A 0 means that the counter can be used. - */ - unsigned long *used_mask; - - /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; -}; - -struct arm_pmu { - struct pmu pmu; - cpumask_t active_irqs; - int *irq_affinity; - const char *name; - irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct hw_perf_event *evt, int idx); - void (*disable)(struct hw_perf_event *evt, int idx); - int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct hw_perf_event *hwc); - int (*set_event_filter)(struct hw_perf_event *evt, - struct perf_event_attr *attr); - u32 (*read_counter)(int idx); - void (*write_counter)(int idx, u32 val); - void (*start)(void); - void (*stop)(void); - void (*reset)(void *); - int (*map_event)(struct perf_event *event); - int num_events; - atomic_t active_events; - struct mutex reserve_mutex; - u64 max_period; - struct platform_device *plat_device; - struct pmu_hw_events *(*get_hw_events)(void); -}; - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); - -u64 armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); - -int armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); - -#endif /* CONFIG_HW_PERF_EVENTS */ -#endif /* __ASM_PMU_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index dcd06d18a42a..555c6dec5ef2 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -111,7 +111,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 20 #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ -#define TIF_SWITCH_MM 23 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index d6e6b6660380..ffdaea7954bb 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,17 +37,21 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - if (tlb->fullmm) { - flush_tlb_mm(tlb->mm); - } else { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; - /* - * The intermediate page table levels are already handled by - * the __(pte|pmd|pud)_free_tlb() functions, so last level - * TLBI is sufficient here. - */ - __flush_tlb_range(&vma, tlb->start, tlb->end, true); - } + struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + + /* + * The ASID allocator will either invalidate the ASID or mark + * it as used. + */ + if (tlb->fullmm) + return; + + /* + * The intermediate page table levels are already handled by + * the __(pte|pmd|pud)_free_tlb() functions, so last level + * TLBI is sufficient here. + */ + __flush_tlb_range(&vma, tlb->start, tlb->end, true); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 7bd2da021658..b460ae28e346 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -63,6 +63,14 @@ * only require the D-TLB to be invalidated. * - kaddr - Kernel virtual memory address */ +static inline void local_flush_tlb_all(void) +{ + dsb(nshst); + asm("tlbi vmalle1"); + dsb(nsh); + isb(); +} + static inline void flush_tlb_all(void) { dsb(ishst); @@ -73,7 +81,7 @@ static inline void flush_tlb_all(void) static inline void flush_tlb_mm(struct mm_struct *mm) { - unsigned long asid = (unsigned long)ASID(mm) << 48; + unsigned long asid = ASID(mm) << 48; dsb(ishst); asm("tlbi aside1is, %0" : : "r" (asid)); @@ -83,8 +91,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - unsigned long addr = uaddr >> 12 | - ((unsigned long)ASID(vma->vm_mm) << 48); + unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); dsb(ishst); asm("tlbi vale1is, %0" : : "r" (addr)); @@ -101,7 +108,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool last_level) { - unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; + unsigned long asid = ASID(vma->vm_mm) << 48; unsigned long addr; if ((end - start) > MAX_TLB_RANGE) { @@ -154,9 +161,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long uaddr) { - unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); + unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); - dsb(ishst); asm("tlbi vae1is, %0" : : "r" (addr)); dsb(ish); } diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 8d89cf8dae55..25de8b244961 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -60,7 +60,7 @@ int main(void) DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); - DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..a48d1f477b2e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -48,7 +48,6 @@ static struct mm_struct efi_mm = { .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - INIT_MM_CONTEXT(efi_mm) }; static int uefi_debug __initdata; @@ -258,7 +257,8 @@ static bool __init efi_virtmap_init(void) */ if (!is_normal_ram(md)) prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) + else if (md->type == EFI_RUNTIME_SERVICES_CODE || + !PAGE_ALIGNED(md->phys_addr)) prot = PAGE_KERNEL_EXEC; else prot = PAGE_KERNEL; @@ -343,9 +343,9 @@ static void efi_set_pgd(struct mm_struct *mm) else cpu_switch_mm(mm->pgd, mm); - flush_tlb_all(); + local_flush_tlb_all(); if (icache_is_aivivt()) - __flush_icache_all(); + __local_flush_icache_all(); } void efi_virtmap_load(void) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -178,6 +178,24 @@ ENTRY(ftrace_stub) ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* save return value regs*/ + .macro save_return_regs + sub sp, sp, #64 + stp x0, x1, [sp] + stp x2, x3, [sp, #16] + stp x4, x5, [sp, #32] + stp x6, x7, [sp, #48] + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + add sp, sp, #64 + .endm + /* * void ftrace_graph_caller(void) * @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. */ ENTRY(return_to_handler) - str x0, [sp, #-16]! + save_return_regs mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address - ldr x0, [sp], #16 + restore_return_regs ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index bba85c8f8037..46465d9fbc4d 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -163,6 +163,20 @@ enum hw_breakpoint_ops { HW_BREAKPOINT_RESTORE }; +static int is_compat_bp(struct perf_event *bp) +{ + struct task_struct *tsk = bp->hw.target; + + /* + * tsk can be NULL for per-cpu (non-ptrace) breakpoints. + * In this case, use the native interface, since we don't have + * the notion of a "compat CPU" and could end up relying on + * deprecated behaviour if we use unaligned watchpoints in + * AArch64 state. + */ + return tsk && is_compat_thread(task_thread_info(tsk)); +} + /** * hw_breakpoint_slot_setup - Find and setup a perf slot according to * operations @@ -420,7 +434,7 @@ static int arch_build_bp_info(struct perf_event *bp) * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { - if (is_compat_task()) { + if (is_compat_bp(bp)) { if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && info->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; @@ -477,7 +491,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * AArch32 tasks expect some simple alignment fixups, so emulate * that here. */ - if (is_compat_task()) { + if (is_compat_bp(bp)) { if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f9a74d4fff3b..5b1897e8ca24 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -18,651 +18,12 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#define pr_fmt(fmt) "hw perfevents: " fmt - -#include <linux/bitmap.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/of_device.h> -#include <linux/perf_event.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/uaccess.h> -#include <asm/cputype.h> -#include <asm/irq.h> #include <asm/irq_regs.h> -#include <asm/pmu.h> - -/* - * ARMv8 supports a maximum of 32 events. - * The cycle counter is included in this total. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); -static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; - -int -armpmu_get_max_events(void) -{ - int max_events = 0; - - if (cpu_pmu != NULL) - max_events = cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(armpmu_get_max_events); - -int perf_num_counters(void) -{ - return armpmu_get_max_events(); -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -#define HW_OP_UNSUPPORTED 0xFFFF - -#define C(_x) \ - PERF_COUNT_HW_CACHE_##_x - -#define CACHE_OP_UNSUPPORTED 0xFFFF - -#define PERF_MAP_ALL_UNSUPPORTED \ - [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED - -#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ -[0 ... C(MAX) - 1] = { \ - [0 ... C(OP_MAX) - 1] = { \ - [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ - }, \ -} - -static int -armpmu_map_cache_event(const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u64 config) -{ - unsigned int cache_type, cache_op, cache_result, ret; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; - - if (ret == CACHE_OP_UNSUPPORTED) - return -ENOENT; - - return ret; -} - -static int -armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) -{ - int mapping; - - if (config >= PERF_COUNT_HW_MAX) - return -EINVAL; - - mapping = (*event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; -} - -static int -armpmu_map_raw_event(u32 raw_event_mask, u64 config) -{ - return (int)(config & raw_event_mask); -} - -static int map_cpu_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) -{ - u64 config = event->attr.config; - - switch (event->attr.type) { - case PERF_TYPE_HARDWARE: - return armpmu_map_event(event_map, config); - case PERF_TYPE_HW_CACHE: - return armpmu_map_cache_event(cache_map, config); - case PERF_TYPE_RAW: - return armpmu_map_raw_event(raw_event_mask, config); - } - - return -ENOENT; -} - -int -armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - s64 left = local64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - /* - * Limit the maximum period to prevent the counter value - * from overtaking the one we are about to program. In - * effect we are reducing max_period to account for - * interrupt latency (and we are being very conservative). - */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; - - local64_set(&hwc->prev_count, (u64)-left); - - armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); - - perf_event_update_userpage(event); - - return ret; -} - -u64 -armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - u64 delta, prev_raw_count, new_raw_count; - -again: - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(idx); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; - - local64_add(delta, &event->count); - local64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void -armpmu_read(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - /* Don't read disabled counters! */ - if (hwc->idx < 0) - return; - - armpmu_event_update(event, hwc, hwc->idx); -} - -static void -armpmu_stop(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to update the counter, so ignore - * PERF_EF_UPDATE, see comments in armpmu_start(). - */ - if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(hwc, hwc->idx); - barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; - } -} - -static void -armpmu_start(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to reprogram the period, so ignore - * PERF_EF_RELOAD, see the comment below. - */ - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - /* - * Set the period again. Some counters can't be stopped, so when we - * were stopped we simply disabled the IRQ source and the counter - * may have been left counting. If we don't do this step then we may - * get an interrupt too soon or *way* too late if the overflow has - * happened since disabling. - */ - armpmu_event_set_period(event, hwc, hwc->idx); - armpmu->enable(hwc, hwc->idx); -} - -static void -armpmu_del(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - WARN_ON(idx < 0); - - armpmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - - perf_event_update_userpage(event); -} - -static int -armpmu_add(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ - event->hw.idx = idx; - armpmu->disable(hwc, idx); - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - armpmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static int -validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, - struct perf_event *event) -{ - struct arm_pmu *armpmu; - struct hw_perf_event fake_event = event->hw; - struct pmu *leader_pmu = event->group_leader->pmu; - - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != pmu) - return 0; - - if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - armpmu = to_arm_pmu(event->pmu); - return armpmu->get_event_idx(hw_events, &fake_event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct pmu_hw_events fake_pmu; - DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); - - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - memset(fake_used_mask, 0, sizeof(fake_used_mask)); - fake_pmu.used_mask = fake_used_mask; - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static void -armpmu_disable_percpu_irq(void *data) -{ - unsigned int irq = *(unsigned int *)data; - disable_percpu_irq(irq); -} - -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - int irq; - unsigned int i, irqs; - struct platform_device *pmu_device = armpmu->plat_device; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (!irqs) - return; - - irq = platform_get_irq(pmu_device, 0); - if (irq <= 0) - return; - - if (irq_is_percpu(irq)) { - on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &cpu_hw_events); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (armpmu->irq_affinity) - cpu = armpmu->irq_affinity[i]; - - if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq > 0) - free_irq(irq, armpmu); - } - } -} - -static void -armpmu_enable_percpu_irq(void *data) -{ - unsigned int irq = *(unsigned int *)data; - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err, irq; - unsigned int i, irqs; - struct platform_device *pmu_device = armpmu->plat_device; - - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (!irqs) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; - } - - irq = platform_get_irq(pmu_device, 0); - if (irq <= 0) { - pr_err("failed to get valid irq for PMU device\n"); - return -ENODEV; - } - - if (irq_is_percpu(irq)) { - err = request_percpu_irq(irq, armpmu->handle_irq, - "arm-pmu", &cpu_hw_events); - - if (err) { - pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", - irq); - armpmu_release_hardware(armpmu); - return err; - } - - on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq <= 0) - continue; - - if (armpmu->irq_affinity) - cpu = armpmu->irq_affinity[i]; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } - - err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING | IRQF_NO_THREAD, - "arm-pmu", armpmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - armpmu_release_hardware(armpmu); - return err; - } - - cpumask_set_cpu(cpu, &armpmu->active_irqs); - } - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int mapping, err; - - mapping = armpmu->map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. For SMP systems, each core has it's own PMU so we can't do any - * clever allocation or constraints checking at this point. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Check whether we need to exclude the counter from certain modes. - */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { - pr_debug("ARM performance counters do not support mode exclusion\n"); - return -EPERM; - } - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - if (!hwc->sample_period) { - /* - * For non-sampling runs, limit the sample_period to half - * of the counter width. That way, the new counter value - * is far less likely to overtake the previous one unless - * you have some serious IRQ latency issues. - */ - hwc->sample_period = armpmu->max_period >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - } - - err = 0; - if (event->group_leader != event) { - err = validate_group(event); - if (err) - return -EINVAL; - } - - return err; -} - -static int armpmu_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; - - if (armpmu->map_event(event) == -ENOENT) - return -ENOENT; - - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; -} - -static void armpmu_enable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); - - if (enabled) - armpmu->start(); -} - -static void armpmu_disable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - armpmu->stop(); -} - -static void __init armpmu_init(struct arm_pmu *armpmu) -{ - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - - armpmu->pmu = (struct pmu) { - .pmu_enable = armpmu_enable, - .pmu_disable = armpmu_disable, - .event_init = armpmu_event_init, - .add = armpmu_add, - .del = armpmu_del, - .start = armpmu_start, - .stop = armpmu_stop, - .read = armpmu_read, - }; -} - -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) -{ - armpmu_init(armpmu); - return perf_pmu_register(&armpmu->pmu, name, type); -} +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> /* * ARMv8 PMUv3 Performance Events handling code. @@ -708,6 +69,21 @@ enum armv8_pmuv3_perf_types { ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, }; +/* ARMv8 Cortex-A53 specific event types. */ +enum armv8_a53_pmu_perf_types { + ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2, +}; + +/* ARMv8 Cortex-A57 specific event types. */ +enum armv8_a57_perf_types { + ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40, + ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41, + ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42, + ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43, + ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c, + ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d, +}; + /* PMUv3 HW events mapping. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -718,6 +94,28 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, }; +/* ARM Cortex-A53 HW events mapping. */ +static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, +}; + static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -734,12 +132,60 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, }; +static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, +}; + + /* * Perf Events' indices */ #define ARMV8_IDX_CYCLE_COUNTER 0 #define ARMV8_IDX_COUNTER0 1 -#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) +#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ + (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV8_MAX_COUNTERS 32 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) @@ -805,49 +251,34 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr) return pmovsr & ARMV8_OVERFLOWED_MASK; } -static inline int armv8pmu_counter_valid(int idx) +static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) { - return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; + return idx >= ARMV8_IDX_CYCLE_COUNTER && + idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu); } static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { - int ret = 0; - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u checking wrong counter %d overflow status\n", - smp_processor_id(), idx); - } else { - counter = ARMV8_IDX_TO_COUNTER(idx); - ret = pmnc & BIT(counter); - } - - return ret; + return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } static inline int armv8pmu_select_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u selecting wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmselr_el0, %0" :: "r" (counter)); isb(); return idx; } -static inline u32 armv8pmu_read_counter(int idx) +static inline u32 armv8pmu_read_counter(struct perf_event *event) { + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; u32 value = 0; - if (!armv8pmu_counter_valid(idx)) + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) @@ -858,9 +289,13 @@ static inline u32 armv8pmu_read_counter(int idx) return value; } -static inline void armv8pmu_write_counter(int idx, u32 value) +static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) { - if (!armv8pmu_counter_valid(idx)) + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) @@ -879,65 +314,34 @@ static inline void armv8pmu_write_evtype(int idx, u32 val) static inline int armv8pmu_enable_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_disable_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_enable_intens(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_disable_intens(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); isb(); + return idx; } @@ -955,10 +359,13 @@ static inline u32 armv8pmu_getreset_flags(void) return value; } -static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void armv8pmu_enable_event(struct perf_event *event) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx = hwc->idx; /* * Enable counter and interrupt, and set the counter to count @@ -989,10 +396,13 @@ static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) +static void armv8pmu_disable_event(struct perf_event *event) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx = hwc->idx; /* * Disable counter and interrupt @@ -1016,7 +426,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) { u32 pmovsr; struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; @@ -1036,7 +447,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - cpuc = this_cpu_ptr(&cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1053,13 +463,13 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } /* @@ -1074,10 +484,10 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) return IRQ_HANDLED; } -static void armv8pmu_start(void) +static void armv8pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ @@ -1085,10 +495,10 @@ static void armv8pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv8pmu_stop(void) +static void armv8pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ @@ -1097,10 +507,12 @@ static void armv8pmu_stop(void) } static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { int idx; - unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { @@ -1151,11 +563,14 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) - armv8pmu_disable_event(NULL, idx); + for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv8pmu_disable_counter(idx); + armv8pmu_disable_intens(idx); + } /* Initialize & Reset PMNC: C and P bits. */ armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); @@ -1166,169 +581,104 @@ static void armv8pmu_reset(void *info) static int armv8_pmuv3_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv8_pmuv3_perf_map, + return armpmu_map_event(event, &armv8_pmuv3_perf_map, &armv8_pmuv3_perf_cache_map, ARMV8_EVTYPE_EVENT); } -static struct arm_pmu armv8pmu = { - .handle_irq = armv8pmu_handle_irq, - .enable = armv8pmu_enable_event, - .disable = armv8pmu_disable_event, - .read_counter = armv8pmu_read_counter, - .write_counter = armv8pmu_write_counter, - .get_event_idx = armv8pmu_get_event_idx, - .start = armv8pmu_start, - .stop = armv8pmu_stop, - .reset = armv8pmu_reset, - .max_period = (1LLU << 32) - 1, -}; +static int armv8_a53_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv8_a53_perf_map, + &armv8_a53_perf_cache_map, + ARMV8_EVTYPE_EVENT); +} -static u32 __init armv8pmu_read_num_pmnc_events(void) +static int armv8_a57_map_event(struct perf_event *event) { - u32 nb_cnt; + return armpmu_map_event(event, &armv8_a57_perf_map, + &armv8_a57_perf_cache_map, + ARMV8_EVTYPE_EVENT); +} + +static void armv8pmu_read_num_pmnc_events(void *info) +{ + int *nb_cnt = info; /* Read the nb of CNTx counters supported from PMNC */ - nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; + *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; - /* Add the CPU cycles counter and return */ - return nb_cnt + 1; + /* Add the CPU cycles counter */ + *nb_cnt += 1; } -static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) +static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu) { - armv8pmu.name = "arm/armv8-pmuv3"; - armv8pmu.map_event = armv8_pmuv3_map_event; - armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); - armv8pmu.set_event_filter = armv8pmu_set_event_filter; - return &armv8pmu; + return smp_call_function_any(&arm_pmu->supported_cpus, + armv8pmu_read_num_pmnc_events, + &arm_pmu->num_events, 1); } -/* - * Ensure the PMU has sane values out of reset. - * This requires SMP to be available, so exists as a separate initcall. - */ -static int __init -cpu_pmu_reset(void) +static void armv8_pmu_init(struct arm_pmu *cpu_pmu) { - if (cpu_pmu && cpu_pmu->reset) - return on_each_cpu(cpu_pmu->reset, NULL, 1); - return 0; + cpu_pmu->handle_irq = armv8pmu_handle_irq, + cpu_pmu->enable = armv8pmu_enable_event, + cpu_pmu->disable = armv8pmu_disable_event, + cpu_pmu->read_counter = armv8pmu_read_counter, + cpu_pmu->write_counter = armv8pmu_write_counter, + cpu_pmu->get_event_idx = armv8pmu_get_event_idx, + cpu_pmu->start = armv8pmu_start, + cpu_pmu->stop = armv8pmu_stop, + cpu_pmu->reset = armv8pmu_reset, + cpu_pmu->max_period = (1LLU << 32) - 1, + cpu_pmu->set_event_filter = armv8pmu_set_event_filter; } -arch_initcall(cpu_pmu_reset); - -/* - * PMU platform driver and devicetree bindings. - */ -static const struct of_device_id armpmu_of_device_ids[] = { - {.compatible = "arm,armv8-pmuv3"}, - {}, -}; -static int armpmu_device_probe(struct platform_device *pdev) +static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - int i, irq, *irqs; - - if (!cpu_pmu) - return -ENODEV; - - /* Don't bother with PPIs; they're already affine */ - irq = platform_get_irq(pdev, 0); - if (irq >= 0 && irq_is_percpu(irq)) - goto out; - - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; - - for (i = 0; i < pdev->num_resources; ++i) { - struct device_node *dn; - int cpu; - - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", - i); - if (!dn) { - pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", - of_node_full_name(pdev->dev.of_node), i); - break; - } - - for_each_possible_cpu(cpu) - if (dn == of_cpu_device_node_get(cpu)) - break; - - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - break; - } - of_node_put(dn); - - irqs[i] = cpu; - } - - if (i == pdev->num_resources) - cpu_pmu->irq_affinity = irqs; - else - kfree(irqs); - -out: - cpu_pmu->plat_device = pdev; - return 0; + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_pmuv3"; + cpu_pmu->map_event = armv8_pmuv3_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -static struct platform_driver armpmu_driver = { - .driver = { - .name = "arm-pmu", - .of_match_table = armpmu_of_device_ids, - }, - .probe = armpmu_device_probe, -}; - -static int __init register_pmu_driver(void) +static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - return platform_driver_register(&armpmu_driver); + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_cortex_a53"; + cpu_pmu->map_event = armv8_a53_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -device_initcall(register_pmu_driver); -static struct pmu_hw_events *armpmu_get_cpu_events(void) +static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - return this_cpu_ptr(&cpu_hw_events); + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_cortex_a57"; + cpu_pmu->map_event = armv8_a57_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -static void __init cpu_pmu_init(struct arm_pmu *armpmu) -{ - int cpu; - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); - events->events = per_cpu(hw_events, cpu); - events->used_mask = per_cpu(used_mask, cpu); - raw_spin_lock_init(&events->pmu_lock); - } - armpmu->get_hw_events = armpmu_get_cpu_events; -} +static const struct of_device_id armv8_pmu_of_device_ids[] = { + {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, + {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, + {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, + {}, +}; -static int __init init_hw_perf_events(void) +static int armv8_pmu_device_probe(struct platform_device *pdev) { - u64 dfr = read_cpuid(ID_AA64DFR0_EL1); - - switch ((dfr >> 8) & 0xf) { - case 0x1: /* PMUv3 */ - cpu_pmu = armv8_pmuv3_pmu_init(); - break; - } + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); +} - if (cpu_pmu) { - pr_info("enabled with %s PMU driver, %d counters available\n", - cpu_pmu->name, cpu_pmu->num_events); - cpu_pmu_init(cpu_pmu); - armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); - } else { - pr_info("no hardware support available\n"); - } +static struct platform_driver armv8_pmu_driver = { + .driver = { + .name = "armv8-pmu", + .of_match_table = armv8_pmu_of_device_ids, + }, + .probe = armv8_pmu_device_probe, +}; - return 0; +static int __init register_armv8_pmu_driver(void) +{ + return platform_driver_register(&armv8_pmu_driver); } -early_initcall(init_hw_perf_events); - +device_initcall(register_armv8_pmu_driver); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dbdaacddd9a5..03b0aa28ea61 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -142,7 +142,6 @@ asmlinkage void secondary_start_kernel(void) */ atomic_inc(&mm->mm_count); current->active_mm = mm; - cpumask_set_cpu(cpu, mm_cpumask(mm)); set_my_cpu_offset(per_cpu_offset(smp_processor_id())); printk("CPU%u: Booted secondary processor\n", cpu); @@ -152,7 +151,7 @@ asmlinkage void secondary_start_kernel(void) * point to zero page to avoid speculatively fetching new entries. */ cpu_set_reserved_ttbr0(); - flush_tlb_all(); + local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); preempt_disable(); @@ -233,12 +232,6 @@ int __cpu_disable(void) * OK - migrate IRQs away from this CPU */ migrate_irqs(); - - /* - * Remove this CPU from the vm mask set of all processes. - */ - clear_tasks_mm_cpumask(cpu); - return 0; } diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 8297d502217e..3c5e4e6dcf68 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -90,7 +90,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) else cpu_switch_mm(mm->pgd, mm); - flush_tlb_all(); + local_flush_tlb_all(); /* * Restore per-cpu offset before any kernel diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 1be9ef27be97..4699cd74f87e 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -18,6 +18,7 @@ #include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cache.h> #include <asm/cpufeature.h> #include <asm/sysreg.h> @@ -31,49 +32,58 @@ * Returns: * x0 - bytes not copied */ + + .macro ldrb1 ptr, regB, val + USER(9998f, ldrb \ptr, [\regB], \val) + .endm + + .macro strb1 ptr, regB, val + strb \ptr, [\regB], \val + .endm + + .macro ldrh1 ptr, regB, val + USER(9998f, ldrh \ptr, [\regB], \val) + .endm + + .macro strh1 ptr, regB, val + strh \ptr, [\regB], \val + .endm + + .macro ldr1 ptr, regB, val + USER(9998f, ldr \ptr, [\regB], \val) + .endm + + .macro str1 ptr, regB, val + str \ptr, [\regB], \val + .endm + + .macro ldp1 ptr, regB, regC, val + USER(9998f, ldp \ptr, \regB, [\regC], \val) + .endm + + .macro stp1 ptr, regB, regC, val + stp \ptr, \regB, [\regC], \val + .endm + +end .req x5 ENTRY(__copy_from_user) ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) - add x5, x1, x2 // upper user buffer boundary - subs x2, x2, #16 - b.mi 1f -0: -USER(9f, ldp x3, x4, [x1], #16) - subs x2, x2, #16 - stp x3, x4, [x0], #16 - b.pl 0b -1: adds x2, x2, #8 - b.mi 2f -USER(9f, ldr x3, [x1], #8 ) - sub x2, x2, #8 - str x3, [x0], #8 -2: adds x2, x2, #4 - b.mi 3f -USER(9f, ldr w3, [x1], #4 ) - sub x2, x2, #4 - str w3, [x0], #4 -3: adds x2, x2, #2 - b.mi 4f -USER(9f, ldrh w3, [x1], #2 ) - sub x2, x2, #2 - strh w3, [x0], #2 -4: adds x2, x2, #1 - b.mi 5f -USER(9f, ldrb w3, [x1] ) - strb w3, [x0] -5: mov x0, #0 + add end, x0, x2 +#include "copy_template.S" ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) + mov x0, #0 // Nothing to copy ret ENDPROC(__copy_from_user) .section .fixup,"ax" .align 2 -9: sub x2, x5, x1 - mov x3, x2 -10: strb wzr, [x0], #1 // zero remaining buffer space - subs x3, x3, #1 - b.ne 10b - mov x0, x2 // bytes not copied +9998: + sub x0, end, dst +9999: + strb wzr, [dst], #1 // zero remaining buffer space + cmp dst, end + b.lo 9999b ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 1b94661e22b3..81c8fc93c100 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -20,6 +20,7 @@ #include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cache.h> #include <asm/cpufeature.h> #include <asm/sysreg.h> @@ -33,44 +34,52 @@ * Returns: * x0 - bytes not copied */ + .macro ldrb1 ptr, regB, val + USER(9998f, ldrb \ptr, [\regB], \val) + .endm + + .macro strb1 ptr, regB, val + USER(9998f, strb \ptr, [\regB], \val) + .endm + + .macro ldrh1 ptr, regB, val + USER(9998f, ldrh \ptr, [\regB], \val) + .endm + + .macro strh1 ptr, regB, val + USER(9998f, strh \ptr, [\regB], \val) + .endm + + .macro ldr1 ptr, regB, val + USER(9998f, ldr \ptr, [\regB], \val) + .endm + + .macro str1 ptr, regB, val + USER(9998f, str \ptr, [\regB], \val) + .endm + + .macro ldp1 ptr, regB, regC, val + USER(9998f, ldp \ptr, \regB, [\regC], \val) + .endm + + .macro stp1 ptr, regB, regC, val + USER(9998f, stp \ptr, \regB, [\regC], \val) + .endm + +end .req x5 ENTRY(__copy_in_user) ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) - add x5, x0, x2 // upper user buffer boundary - subs x2, x2, #16 - b.mi 1f -0: -USER(9f, ldp x3, x4, [x1], #16) - subs x2, x2, #16 -USER(9f, stp x3, x4, [x0], #16) - b.pl 0b -1: adds x2, x2, #8 - b.mi 2f -USER(9f, ldr x3, [x1], #8 ) - sub x2, x2, #8 -USER(9f, str x3, [x0], #8 ) -2: adds x2, x2, #4 - b.mi 3f -USER(9f, ldr w3, [x1], #4 ) - sub x2, x2, #4 -USER(9f, str w3, [x0], #4 ) -3: adds x2, x2, #2 - b.mi 4f -USER(9f, ldrh w3, [x1], #2 ) - sub x2, x2, #2 -USER(9f, strh w3, [x0], #2 ) -4: adds x2, x2, #1 - b.mi 5f -USER(9f, ldrb w3, [x1] ) -USER(9f, strb w3, [x0] ) -5: mov x0, #0 + add end, x0, x2 +#include "copy_template.S" ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) + mov x0, #0 ret ENDPROC(__copy_in_user) .section .fixup,"ax" .align 2 -9: sub x0, x5, x0 // bytes not copied +9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S new file mode 100644 index 000000000000..410fbdb8163f --- /dev/null +++ b/arch/arm64/lib/copy_template.S @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * and re-licensed under GPLv2 for the Linux kernel. The original code can + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ +dstin .req x0 +src .req x1 +count .req x2 +tmp1 .req x3 +tmp1w .req w3 +tmp2 .req x4 +tmp2w .req w4 +dst .req x6 + +A_l .req x7 +A_h .req x8 +B_l .req x9 +B_h .req x10 +C_l .req x11 +C_h .req x12 +D_l .req x13 +D_h .req x14 + + mov dst, dstin + cmp count, #16 + /*When memory length is less than 16, the accessed are not aligned.*/ + b.lo .Ltiny15 + + neg tmp2, src + ands tmp2, tmp2, #15/* Bytes to reach alignment. */ + b.eq .LSrcAligned + sub count, count, tmp2 + /* + * Copy the leading memory data from src to dst in an increasing + * address order.By this way,the risk of overwritting the source + * memory data is eliminated when the distance between src and + * dst is less than 16. The memory accesses here are alignment. + */ + tbz tmp2, #0, 1f + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 +1: + tbz tmp2, #1, 2f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +2: + tbz tmp2, #2, 3f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +3: + tbz tmp2, #3, .LSrcAligned + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 + +.LSrcAligned: + cmp count, #64 + b.ge .Lcpy_over64 + /* + * Deal with small copies quickly by dropping straight into the + * exit block. + */ +.Ltail63: + /* + * Copy up to 48 bytes of data. At this point we only need the + * bottom 6 bits of count to be accurate. + */ + ands tmp1, count, #0x30 + b.eq .Ltiny15 + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +1: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +2: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +.Ltiny15: + /* + * Prefer to break one ldp/stp into several load/store to access + * memory in an increasing address order,rather than to load/store 16 + * bytes from (src-16) to (dst-16) and to backward the src to aligned + * address,which way is used in original cortex memcpy. If keeping + * the original memcpy process here, memmove need to satisfy the + * precondition that src address is at least 16 bytes bigger than dst + * address,otherwise some source data will be overwritten when memove + * call memcpy directly. To make memmove simpler and decouple the + * memcpy's dependency on memmove, withdrew the original process. + */ + tbz count, #3, 1f + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 +1: + tbz count, #2, 2f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +2: + tbz count, #1, 3f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +3: + tbz count, #0, .Lexitfunc + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 + + b .Lexitfunc + +.Lcpy_over64: + subs count, count, #128 + b.ge .Lcpy_body_large + /* + * Less than 128 bytes to copy, so handle 64 here and then jump + * to the tail. + */ + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + ldp1 D_l, D_h, src, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63 + b .Lexitfunc + + /* + * Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line this ensures the entire loop is in one line. + */ + .p2align L1_CACHE_SHIFT +.Lcpy_body_large: + /* pre-get 64 bytes data. */ + ldp1 A_l, A_h, src, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + ldp1 D_l, D_h, src, #16 +1: + /* + * interlace the load of next 64 bytes data block with store of the last + * loaded 64 bytes data. + */ + stp1 A_l, A_h, dst, #16 + ldp1 A_l, A_h, src, #16 + stp1 B_l, B_h, dst, #16 + ldp1 B_l, B_h, src, #16 + stp1 C_l, C_h, dst, #16 + ldp1 C_l, C_h, src, #16 + stp1 D_l, D_h, dst, #16 + ldp1 D_l, D_h, src, #16 + subs count, count, #64 + b.ge 1b + stp1 A_l, A_h, dst, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63 +.Lexitfunc: diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index a257b47e2dc4..7512bbbc07ac 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -18,6 +18,7 @@ #include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cache.h> #include <asm/cpufeature.h> #include <asm/sysreg.h> @@ -31,44 +32,52 @@ * Returns: * x0 - bytes not copied */ + .macro ldrb1 ptr, regB, val + ldrb \ptr, [\regB], \val + .endm + + .macro strb1 ptr, regB, val + USER(9998f, strb \ptr, [\regB], \val) + .endm + + .macro ldrh1 ptr, regB, val + ldrh \ptr, [\regB], \val + .endm + + .macro strh1 ptr, regB, val + USER(9998f, strh \ptr, [\regB], \val) + .endm + + .macro ldr1 ptr, regB, val + ldr \ptr, [\regB], \val + .endm + + .macro str1 ptr, regB, val + USER(9998f, str \ptr, [\regB], \val) + .endm + + .macro ldp1 ptr, regB, regC, val + ldp \ptr, \regB, [\regC], \val + .endm + + .macro stp1 ptr, regB, regC, val + USER(9998f, stp \ptr, \regB, [\regC], \val) + .endm + +end .req x5 ENTRY(__copy_to_user) ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) - add x5, x0, x2 // upper user buffer boundary - subs x2, x2, #16 - b.mi 1f -0: - ldp x3, x4, [x1], #16 - subs x2, x2, #16 -USER(9f, stp x3, x4, [x0], #16) - b.pl 0b -1: adds x2, x2, #8 - b.mi 2f - ldr x3, [x1], #8 - sub x2, x2, #8 -USER(9f, str x3, [x0], #8 ) -2: adds x2, x2, #4 - b.mi 3f - ldr w3, [x1], #4 - sub x2, x2, #4 -USER(9f, str w3, [x0], #4 ) -3: adds x2, x2, #2 - b.mi 4f - ldrh w3, [x1], #2 - sub x2, x2, #2 -USER(9f, strh w3, [x0], #2 ) -4: adds x2, x2, #1 - b.mi 5f - ldrb w3, [x1] -USER(9f, strb w3, [x0] ) -5: mov x0, #0 + add end, x0, x2 +#include "copy_template.S" ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) + mov x0, #0 ret ENDPROC(__copy_to_user) .section .fixup,"ax" .align 2 -9: sub x0, x5, x0 // bytes not copied +9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 8a9a96d3ddae..173a1aace9bb 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -36,166 +36,39 @@ * Returns: * x0 - dest */ -dstin .req x0 -src .req x1 -count .req x2 -tmp1 .req x3 -tmp1w .req w3 -tmp2 .req x4 -tmp2w .req w4 -tmp3 .req x5 -tmp3w .req w5 -dst .req x6 + .macro ldrb1 ptr, regB, val + ldrb \ptr, [\regB], \val + .endm -A_l .req x7 -A_h .req x8 -B_l .req x9 -B_h .req x10 -C_l .req x11 -C_h .req x12 -D_l .req x13 -D_h .req x14 + .macro strb1 ptr, regB, val + strb \ptr, [\regB], \val + .endm -ENTRY(memcpy) - mov dst, dstin - cmp count, #16 - /*When memory length is less than 16, the accessed are not aligned.*/ - b.lo .Ltiny15 + .macro ldrh1 ptr, regB, val + ldrh \ptr, [\regB], \val + .endm - neg tmp2, src - ands tmp2, tmp2, #15/* Bytes to reach alignment. */ - b.eq .LSrcAligned - sub count, count, tmp2 - /* - * Copy the leading memory data from src to dst in an increasing - * address order.By this way,the risk of overwritting the source - * memory data is eliminated when the distance between src and - * dst is less than 16. The memory accesses here are alignment. - */ - tbz tmp2, #0, 1f - ldrb tmp1w, [src], #1 - strb tmp1w, [dst], #1 -1: - tbz tmp2, #1, 2f - ldrh tmp1w, [src], #2 - strh tmp1w, [dst], #2 -2: - tbz tmp2, #2, 3f - ldr tmp1w, [src], #4 - str tmp1w, [dst], #4 -3: - tbz tmp2, #3, .LSrcAligned - ldr tmp1, [src],#8 - str tmp1, [dst],#8 + .macro strh1 ptr, regB, val + strh \ptr, [\regB], \val + .endm -.LSrcAligned: - cmp count, #64 - b.ge .Lcpy_over64 - /* - * Deal with small copies quickly by dropping straight into the - * exit block. - */ -.Ltail63: - /* - * Copy up to 48 bytes of data. At this point we only need the - * bottom 6 bits of count to be accurate. - */ - ands tmp1, count, #0x30 - b.eq .Ltiny15 - cmp tmp1w, #0x20 - b.eq 1f - b.lt 2f - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -1: - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -2: - ldp A_l, A_h, [src], #16 - stp A_l, A_h, [dst], #16 -.Ltiny15: - /* - * Prefer to break one ldp/stp into several load/store to access - * memory in an increasing address order,rather than to load/store 16 - * bytes from (src-16) to (dst-16) and to backward the src to aligned - * address,which way is used in original cortex memcpy. If keeping - * the original memcpy process here, memmove need to satisfy the - * precondition that src address is at least 16 bytes bigger than dst - * address,otherwise some source data will be overwritten when memove - * call memcpy directly. To make memmove simpler and decouple the - * memcpy's dependency on memmove, withdrew the original process. - */ - tbz count, #3, 1f - ldr tmp1, [src], #8 - str tmp1, [dst], #8 -1: - tbz count, #2, 2f - ldr tmp1w, [src], #4 - str tmp1w, [dst], #4 -2: - tbz count, #1, 3f - ldrh tmp1w, [src], #2 - strh tmp1w, [dst], #2 -3: - tbz count, #0, .Lexitfunc - ldrb tmp1w, [src] - strb tmp1w, [dst] + .macro ldr1 ptr, regB, val + ldr \ptr, [\regB], \val + .endm -.Lexitfunc: - ret + .macro str1 ptr, regB, val + str \ptr, [\regB], \val + .endm -.Lcpy_over64: - subs count, count, #128 - b.ge .Lcpy_body_large - /* - * Less than 128 bytes to copy, so handle 64 here and then jump - * to the tail. - */ - ldp A_l, A_h, [src],#16 - stp A_l, A_h, [dst],#16 - ldp B_l, B_h, [src],#16 - ldp C_l, C_h, [src],#16 - stp B_l, B_h, [dst],#16 - stp C_l, C_h, [dst],#16 - ldp D_l, D_h, [src],#16 - stp D_l, D_h, [dst],#16 + .macro ldp1 ptr, regB, regC, val + ldp \ptr, \regB, [\regC], \val + .endm - tst count, #0x3f - b.ne .Ltail63 - ret + .macro stp1 ptr, regB, regC, val + stp \ptr, \regB, [\regC], \val + .endm - /* - * Critical loop. Start at a new cache line boundary. Assuming - * 64 bytes per line this ensures the entire loop is in one line. - */ - .p2align L1_CACHE_SHIFT -.Lcpy_body_large: - /* pre-get 64 bytes data. */ - ldp A_l, A_h, [src],#16 - ldp B_l, B_h, [src],#16 - ldp C_l, C_h, [src],#16 - ldp D_l, D_h, [src],#16 -1: - /* - * interlace the load of next 64 bytes data block with store of the last - * loaded 64 bytes data. - */ - stp A_l, A_h, [dst],#16 - ldp A_l, A_h, [src],#16 - stp B_l, B_h, [dst],#16 - ldp B_l, B_h, [src],#16 - stp C_l, C_h, [dst],#16 - ldp C_l, C_h, [src],#16 - stp D_l, D_h, [dst],#16 - ldp D_l, D_h, [src],#16 - subs count, count, #64 - b.ge 1b - stp A_l, A_h, [dst],#16 - stp B_l, B_h, [dst],#16 - stp C_l, C_h, [dst],#16 - stp D_l, D_h, [dst],#16 - - tst count, #0x3f - b.ne .Ltail63 +ENTRY(memcpy) +#include "copy_template.S" ret ENDPROC(memcpy) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index d70ff14dbdbd..f636a2639f03 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -17,135 +17,185 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/init.h> +#include <linux/bitops.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/percpu.h> +#include <asm/cpufeature.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> -#include <asm/cachetype.h> -#define asid_bits(reg) \ - (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8) +static u32 asid_bits; +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS) +static atomic64_t asid_generation; +static unsigned long *asid_map; -static DEFINE_RAW_SPINLOCK(cpu_asid_lock); -unsigned int cpu_last_asid = ASID_FIRST_VERSION; +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); +static cpumask_t tlb_flush_pending; -/* - * We fork()ed a process, and we need a new context for the child to run in. - */ -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - mm->context.id = 0; - raw_spin_lock_init(&mm->context.id_lock); -} +#define ASID_MASK (~GENMASK(asid_bits - 1, 0)) +#define ASID_FIRST_VERSION (1UL << asid_bits) +#define NUM_USER_ASIDS ASID_FIRST_VERSION -static void flush_context(void) +static void flush_context(unsigned int cpu) { - /* set the reserved TTBR0 before flushing the TLB */ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); - if (icache_is_aivivt()) - __flush_icache_all(); -} + int i; + u64 asid; -static void set_mm_context(struct mm_struct *mm, unsigned int asid) -{ - unsigned long flags; + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); /* - * Locking needed for multi-threaded applications where the same - * mm->context.id could be set from different CPUs during the - * broadcast. This function is also called via IPI so the - * mm->context.id_lock has to be IRQ-safe. + * Ensure the generation bump is observed before we xchg the + * active_asids. */ - raw_spin_lock_irqsave(&mm->context.id_lock, flags); - if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { + smp_wmb(); + + for_each_possible_cpu(i) { + asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); /* - * Old version of ASID found. Set the new one and reset - * mm_cpumask(mm). + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. */ - mm->context.id = asid; - cpumask_clear(mm_cpumask(mm)); + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); + per_cpu(reserved_asids, i) = asid; } - raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); - /* - * Set the mm_cpumask(mm) bit for the current CPU. - */ - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + /* Queue a TLB invalidate and flush the I-cache if necessary. */ + cpumask_setall(&tlb_flush_pending); + + if (icache_is_aivivt()) + __flush_icache_all(); } -/* - * Reset the ASID on the current CPU. This function call is broadcast from the - * CPU handling the ASID rollover and holding cpu_asid_lock. - */ -static void reset_context(void *info) +static int is_reserved_asid(u64 asid) +{ + int cpu; + for_each_possible_cpu(cpu) + if (per_cpu(reserved_asids, cpu) == asid) + return 1; + return 0; +} + +static u64 new_context(struct mm_struct *mm, unsigned int cpu) { - unsigned int asid; - unsigned int cpu = smp_processor_id(); - struct mm_struct *mm = current->active_mm; + static u32 cur_idx = 1; + u64 asid = atomic64_read(&mm->context.id); + u64 generation = atomic64_read(&asid_generation); + + if (asid != 0) { + /* + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (is_reserved_asid(asid)) + return generation | (asid & ~ASID_MASK); + + /* + * We had a valid ASID in a previous life, so try to re-use + * it if possible. + */ + asid &= ~ASID_MASK; + if (!__test_and_set_bit(asid, asid_map)) + goto bump_gen; + } /* - * current->active_mm could be init_mm for the idle thread immediately - * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to - * the reserved value, so no need to reset any context. + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. + * We always count from ASID #1, as we use ASID #0 when setting a + * reserved TTBR0 for the init_mm. */ - if (mm == &init_mm) - return; + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + if (asid != NUM_USER_ASIDS) + goto set_asid; - smp_rmb(); - asid = cpu_last_asid + cpu; + /* We're out of ASIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); - flush_context(); - set_mm_context(mm, asid); + /* We have at least 1 ASID per CPU, so this will always succeed */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); - /* set the new ASID */ - cpu_switch_mm(mm->pgd, mm); +set_asid: + __set_bit(asid, asid_map); + cur_idx = asid; + +bump_gen: + asid |= generation; + return asid; } -void __new_context(struct mm_struct *mm) +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) { - unsigned int asid; - unsigned int bits = asid_bits(); + unsigned long flags; + u64 asid; + + asid = atomic64_read(&mm->context.id); - raw_spin_lock(&cpu_asid_lock); /* - * Check the ASID again, in case the change was broadcast from another - * CPU before we acquired the lock. + * The memory ordering here is subtle. We rely on the control + * dependency between the generation read and the update of + * active_asids to ensure that we are synchronised with a + * parallel rollover (i.e. this pairs with the smp_wmb() in + * flush_context). */ - if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - raw_spin_unlock(&cpu_asid_lock); - return; + if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits) + && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) + goto switch_mm_fastpath; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(&mm->context.id); + if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); } - /* - * At this point, it is guaranteed that the current mm (with an old - * ASID) isn't active on any other CPU since the ASIDs are changed - * simultaneously via IPI. - */ - asid = ++cpu_last_asid; - /* - * If we've used up all our ASIDs, we need to start a new version and - * flush the TLB. - */ - if (unlikely((asid & ((1 << bits) - 1)) == 0)) { - /* increment the ASID version */ - cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits); - if (cpu_last_asid == 0) - cpu_last_asid = ASID_FIRST_VERSION; - asid = cpu_last_asid + smp_processor_id(); - flush_context(); - smp_wmb(); - smp_call_function(reset_context, NULL, 1); - cpu_last_asid += NR_CPUS - 1; + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + local_flush_tlb_all(); + + atomic64_set(&per_cpu(active_asids, cpu), asid); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + +switch_mm_fastpath: + cpu_switch_mm(mm->pgd, mm); +} + +static int asids_init(void) +{ + int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4); + + switch (fld) { + default: + pr_warn("Unknown ASID size (%d); assuming 8-bit\n", fld); + /* Fallthrough */ + case 0: + asid_bits = 8; + break; + case 2: + asid_bits = 16; } - set_mm_context(mm, asid); - raw_spin_unlock(&cpu_asid_lock); + /* If we end up with more CPUs than ASIDs, expect things to crash */ + WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); + atomic64_set(&asid_generation, ASID_FIRST_VERSION); + asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), + GFP_KERNEL); + if (!asid_map) + panic("Failed to allocate bitmap for %lu ASIDs\n", + NUM_USER_ASIDS); + + pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); + return 0; } +early_initcall(asids_init); diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index f3d6221cd5bd..5a22a119a74c 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -67,6 +67,12 @@ static struct addr_marker address_markers[] = { { -1, NULL }, }; +/* + * The page dumper groups page table entries of the same type into a single + * description. It uses pg_state to track the range information while + * iterating over the pte entries. When the continuity is broken it then + * dumps out a description of the range. + */ struct pg_state { struct seq_file *seq; const struct addr_marker *marker; @@ -114,6 +120,16 @@ static const struct prot_bits pte_bits[] = { .set = "NG", .clear = " ", }, { + .mask = PTE_CONT, + .val = PTE_CONT, + .set = "CON", + .clear = " ", + }, { + .mask = PTE_TABLE_BIT, + .val = PTE_TABLE_BIT, + .set = " ", + .clear = "BLK", + }, { .mask = PTE_UXN, .val = PTE_UXN, .set = "UXN", @@ -198,7 +214,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, unsigned long delta; if (st->current_prot) { - seq_printf(st->seq, "0x%16lx-0x%16lx ", + seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr); delta = (addr - st->start_address) >> 10; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9211b8527f25..eed6d52f5e54 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -80,19 +80,55 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) do { /* * Need to have the least restrictive permissions available - * permissions will be fixed up later + * permissions will be fixed up later. Default the new page + * range as contiguous ptes. */ - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); pfn++; } while (pte++, i++, i < PTRS_PER_PTE); } +/* + * Given a PTE with the CONT bit set, determine where the CONT range + * starts, and clear the entire range of PTE CONT bits. + */ +static void clear_cont_pte_range(pte_t *pte, unsigned long addr) +{ + int i; + + pte -= CONT_RANGE_OFFSET(addr); + for (i = 0; i < CONT_PTES; i++) { + set_pte(pte, pte_mknoncont(*pte)); + pte++; + } + flush_tlb_all(); +} + +/* + * Given a range of PTEs set the pfn and provided page protection flags + */ +static void __populate_init_pte(pte_t *pte, unsigned long addr, + unsigned long end, phys_addr_t phys, + pgprot_t prot) +{ + unsigned long pfn = __phys_to_pfn(phys); + + do { + /* clear all the bits except the pfn, then apply the prot */ + set_pte(pte, pfn_pte(pfn, prot)); + pte++; + pfn++; + addr += PAGE_SIZE; + } while (addr != end); +} + static void alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long pfn, + unsigned long end, phys_addr_t phys, pgprot_t prot, void *(*alloc)(unsigned long size)) { pte_t *pte; + unsigned long next; if (pmd_none(*pmd) || pmd_sect(*pmd)) { pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); @@ -105,9 +141,27 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - set_pte(pte, pfn_pte(pfn, prot)); - pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); + next = min(end, (addr + CONT_SIZE) & CONT_MASK); + if (((addr | next | phys) & ~CONT_MASK) == 0) { + /* a block of CONT_PTES */ + __populate_init_pte(pte, addr, next, phys, + prot | __pgprot(PTE_CONT)); + } else { + /* + * If the range being split is already inside of a + * contiguous range but this PTE isn't going to be + * contiguous, then we want to unmark the adjacent + * ranges, then update the portion of the range we + * are interrested in. + */ + clear_cont_pte_range(pte, addr); + __populate_init_pte(pte, addr, next, phys, prot); + } + + pte += (next - addr) >> PAGE_SHIFT; + phys += next - addr; + addr = next; + } while (addr != end); } void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -168,8 +222,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } } else { - alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), - prot, alloc); + alloc_init_pte(pmd, addr, next, phys, prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); @@ -456,7 +509,7 @@ void __init paging_init(void) * point to zero page to avoid speculatively fetching new entries. */ cpu_set_reserved_ttbr0(); - flush_tlb_all(); + local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index e4ee7bd8830a..91cb2eaac256 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -130,7 +130,7 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - mmid w1, x1 // get mm->context.id + mmid x1, x1 // get mm->context.id bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 isb @@ -146,8 +146,8 @@ ENDPROC(cpu_do_switch_mm) * value of the SCTLR_EL1 register. */ ENTRY(__cpu_setup) - tlbi vmalle1is // invalidate I + D TLBs - dsb ish + tlbi vmalle1 // Invalidate local TLB + dsb nsh mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -20,4 +20,5 @@ generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -46,4 +46,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -43,4 +43,5 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += types.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -58,4 +58,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += module.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -344,6 +349,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -355,6 +360,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -345,6 +350,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -364,6 +369,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -354,6 +359,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h @@ -4,4 +4,34 @@ #define __ALIGN .align 4 #define __ALIGN_STR ".align 4" +/* + * Make sure the compiler doesn't do anything stupid with the + * arguments on the stack - they are owned by the *caller*, not + * the callee. This just fools gcc into not spilling into them, + * and keeps it from doing tailcall recursion and/or using the + * stack slots for temporaries, since they are live and "used" + * all the way to the end of the function. + */ +#define asmlinkage_protect(n, ret, args...) \ + __asmlinkage_protect##n(ret, ##args) +#define __asmlinkage_protect_n(ret, args...) \ + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) +#define __asmlinkage_protect0(ret) \ + __asmlinkage_protect_n(ret) +#define __asmlinkage_protect1(ret, arg1) \ + __asmlinkage_protect_n(ret, "m" (arg1)) +#define __asmlinkage_protect2(ret, arg1, arg2) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4)) +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5)) +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5), "m" (arg6)) + #endif diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 356 +#define NR_syscalls 375 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -361,5 +361,24 @@ #define __NR_memfd_create 353 #define __NR_bpf 354 #define __NR_execveat 355 +#define __NR_socket 356 +#define __NR_socketpair 357 +#define __NR_bind 358 +#define __NR_connect 359 +#define __NR_listen 360 +#define __NR_accept4 361 +#define __NR_getsockopt 362 +#define __NR_setsockopt 363 +#define __NR_getsockname 364 +#define __NR_getpeername 365 +#define __NR_sendto 366 +#define __NR_sendmsg 367 +#define __NR_recvfrom 368 +#define __NR_recvmsg 369 +#define __NR_shutdown 370 +#define __NR_recvmmsg 371 +#define __NR_sendmmsg 372 +#define __NR_userfaultfd 373 +#define __NR_membarrier 374 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -376,4 +376,22 @@ ENTRY(sys_call_table) .long sys_memfd_create .long sys_bpf .long sys_execveat /* 355 */ - + .long sys_socket + .long sys_socketpair + .long sys_bind + .long sys_connect + .long sys_listen /* 360 */ + .long sys_accept4 + .long sys_getsockopt + .long sys_setsockopt + .long sys_getsockname + .long sys_getpeername /* 365 */ + .long sys_sendto + .long sys_sendmsg + .long sys_recvfrom + .long sys_recvmsg + .long sys_shutdown /* 370 */ + .long sys_recvmmsg + .long sys_sendmmsg + .long sys_userfaultfd + .long sys_membarrier diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -54,4 +54,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 15ecb4831e12..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( return 0; } -IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", - ath79_misc_intc_of_init); + +static int __init ar7100_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", + ar7100_misc_intc_of_init); + +static int __init ar7240_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", + ar7240_misc_intc_of_init); static int __init ar79_cpu_intc_of_init( struct device_node *node, struct device_node *parent) diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -933,7 +933,7 @@ void __init plat_mem_setup(void) while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) && (total < MAX_MEMORY)) { memory = cvmx_bootmem_phy_alloc(mem_alloc_size, - __pa_symbol(&__init_end), -1, + __pa_symbol(&_end), -1, 0x100000, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (memory >= 0) { diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -17,4 +17,5 @@ generic-y += segment.h generic-y += serial.h generic-y += trace_clock.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -20,6 +20,9 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif +#ifndef cpu_has_ftlb +#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) +#endif #ifndef cpu_has_tlbinv #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -385,6 +385,7 @@ enum cpu_type_enum { #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ +#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h @@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, } /** + * maar_init() - initialise MAARs + * + * Performs initialisation of MAARs for the current CPU, making use of the + * platforms implementation of platform_maar_init where necessary and + * duplicating the setup it provides on secondary CPUs. + */ +extern void maar_init(void); + +/** * struct maar_config - MAAR configuration data * @lower: The lowest address that the MAAR pair will affect. Must be * aligned to a 2^16 byte boundary. diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) +BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) /* Core Local & Core Other register accessor functions */ BUILD_CM_Cx_RW(reset_release, 0x00) @@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) +/* GCR_SYS_CONFIG2 register fields */ +#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 +#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) + /* GCR_Cx_COHERENCE register fields */ #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) @@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) return read_gcr_rev(); } +/** + * mips_cm_max_vp_width() - return the width in bits of VP indices + * + * Return: the width, in bits, of VP indices in fields that combine core & VP + * indices. + */ +static inline unsigned int mips_cm_max_vp_width(void) +{ + extern int smp_num_siblings; + + if (mips_cm_revision() >= CM_REV_CM3) + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + + return smp_num_siblings; +} + +/** + * mips_cm_vp_id() - calculate the hardware VP ID for a CPU + * @cpu: the CPU whose VP ID to calculate + * + * Hardware such as the GIC uses identifiers for VPs which may not match the + * CPU numbers used by Linux. This function calculates the hardware VP + * identifier corresponding to a given CPU. + * + * Return: the VP ID for the CPU. + */ +static inline unsigned int mips_cm_vp_id(unsigned int cpu) +{ + unsigned int core = cpu_data[cpu].core; + unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); + + return (core * mips_cm_max_vp_width()) + vp; +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -487,6 +487,8 @@ /* Bits specific to the MIPS32/64 PRA. */ #define MIPS_CONF_MT (_ULCAST_(7) << 7) +#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) +#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) #define MIPS_CONF_AR (_ULCAST_(7) << 10) #define MIPS_CONF_AT (_ULCAST_(3) << 13) #define MIPS_CONF_M (_ULCAST_(1) << 31) diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -377,16 +377,18 @@ #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) #define __NR_execveat (__NR_Linux + 356) +#define __NR_userfaultfd (__NR_Linux + 357) +#define __NR_membarrier (__NR_Linux + 358) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 356 +#define __NR_Linux_syscalls 358 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 356 +#define __NR_O32_Linux_syscalls 358 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -711,16 +713,18 @@ #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) #define __NR_execveat (__NR_Linux + 316) +#define __NR_userfaultfd (__NR_Linux + 317) +#define __NR_membarrier (__NR_Linux + 318) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 316 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 316 +#define __NR_64_Linux_syscalls 318 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1049,15 +1053,17 @@ #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) #define __NR_execveat (__NR_Linux + 320) +#define __NR_userfaultfd (__NR_Linux + 321) +#define __NR_membarrier (__NR_Linux + 322) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 320 +#define __NR_Linux_syscalls 322 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 320 +#define __NR_N32_Linux_syscalls 322 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -26,6 +26,7 @@ #include <linux/power/jz4740-battery.h> #include <linux/power/gpio-charger.h> +#include <asm/mach-jz4740/gpio.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/jz4740_mmc.h> #include <asm/mach-jz4740/jz4740_nand.h> diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index a74e181058b0..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -28,6 +28,7 @@ #include <linux/seq_file.h> #include <asm/mach-jz4740/base.h> +#include <asm/mach-jz4740/gpio.h> #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -39,6 +39,7 @@ mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt + nop .endm .section .text.cps-vec @@ -223,10 +224,9 @@ LEAF(excep_ejtag) END(excep_ejtag) LEAF(mips_cps_core_init) -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f - nop .set push .set mips64r2 @@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + li t9, 0 +#ifdef CONFIG_MIPS_MT_SMP has_mt ta2, 1f - li t9, 0 /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 @@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 +#endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE @@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) PTR_ADDU v0, v0, ta3 -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* If the core doesn't support MT then return */ bnez ta2, 1f @@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) 2: .set pop -#endif /* CONFIG_MIPS_MT */ +#endif /* CONFIG_MIPS_MT_SMP */ /* Return */ jr ra diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; - int isa; + int isa, mt; config0 = read_c0_config(); /* * Look for Standard TLB or Dual VTLB and FTLB */ - if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || - (((config0 & MIPS_CONF_MT) >> 7) == 4)) + mt = config0 & MIPS_CONF_MT; + if (mt == MIPS_CONF_MT_TLB) c->options |= MIPS_CPU_TLB; + else if (mt == MIPS_CONF_MT_FTLB) + c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { @@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) if (cpu_has_tlb) { if (((config4 & MIPS_CONF4_IE) >> 29) == 2) c->options |= MIPS_CPU_TLBINV; + /* - * This is a bit ugly. R6 has dropped that field from - * config4 and the only valid configuration is VTLB+FTLB so - * set a good value for mmuextdef for that case. + * R6 has dropped the MMUExtDef field from config4. + * On R6 the fields always describe the FTLB, and only if it is + * present according to Config.MT. */ - if (cpu_has_mips_r6) + if (!cpu_has_mips_r6) + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + else if (cpu_has_ftlb) mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; else - mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + mmuextdef = 0; switch (mmuextdef) { case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -18,7 +18,7 @@ .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 7 LEAF(resume) @@ -28,30 +28,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* - * check if we need to save FPU registers - */ - .set push - .set noreorder - beqz a3, 1f - PTR_L t3, TASK_THREAD_INFO(a0) - .set pop - - /* - * clear saved user stack CU1 bit - */ - LONG_L t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - LONG_S t0, ST_OFF(t3) - - .set push - .set arch=mips64r2 - fpu_save_double a0 t0 t1 # c0_status passed in t0 - # clobbers t1 - .set pop -1: - #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -31,18 +31,8 @@ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) /* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - -/* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS @@ -50,22 +40,6 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) - beqz a3, 1f - - PTR_L t3, TASK_THREAD_INFO(a0) - - /* - * clear saved user stack CU1 bit - */ - lw t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - sw t0, ST_OFF(t3) - - fpu_save_single a0, t0 # clobbers t0 - -1: - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) lw t1, PT_EPC(sp) # skip syscall on return subu v0, v0, __NR_O32_Linux # check syscall number - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) - beqz t0, illegal_syscall - - sll t0, v0, 2 - la t1, sys_call_table - addu t1, t0 - lw t2, (t1) # syscall routine - beqz t2, illegal_syscall sw a3, PT_R26(sp) # save a3 for syscall restarting @@ -96,6 +88,16 @@ loads_done: li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes +syscall_common: + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + beqz t0, illegal_syscall + + sll t0, v0, 2 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + + beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) @@ -116,7 +118,7 @@ o32_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp /* @@ -129,27 +131,18 @@ syscall_trace_entry: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall + + move v0, s0 # restore syscall - move t0, s0 RESTORE_STATIC lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f - - lw t1, PT_R2(sp) # syscall number - negu v0 # error - sw t1, PT_R0(sp) # save it for syscall restarting -1: sw v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -599,3 +592,5 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) .set at #endif - dsubu t0, v0, __NR_64_Linux # check syscall number - sltiu t0, t0, __NR_64_Linux_syscalls + 1 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif - beqz t0, illegal_syscall - - dsll t0, v0, 3 # offset into table - ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) - # syscall routine sd a3, PT_R26(sp) # save a3 for syscall restarting @@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) and t0, t1, t0 bnez t0, syscall_trace_entry +syscall_common: + dsubu t2, v0, __NR_64_Linux + sltiu t0, t2, __NR_64_Linux_syscalls + 1 + beqz t0, illegal_syscall + + dsll t0, t2, 3 # offset into table + dla t2, sys_call_table + daddu t0, t2, t0 + ld t2, (t0) # syscall routine + beqz t2, illegal_syscall + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -78,14 +82,14 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move v0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -93,19 +97,9 @@ syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ @@ -436,4 +430,6 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 5315 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) and t0, t1, t0 bnez t0, n32_syscall_trace_entry +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -75,9 +76,9 @@ n32_syscall_trace_entry: move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -85,19 +86,9 @@ n32_syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to @@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) PTR sys_memfd_create PTR sys_bpf PTR compat_sys_execveat /* 6320 */ + PTR sys_userfaultfd + PTR sys_membarrier .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -87,6 +87,7 @@ loads_done: and t0, t1, t0 bnez t0, trace_a_syscall +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -130,9 +131,9 @@ trace_a_syscall: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -142,19 +143,9 @@ trace_a_syscall: ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -584,4 +575,6 @@ EXPORT(sys32_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR compat_sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -338,7 +338,7 @@ static void __init bootmem_init(void) if (end <= reserved_end) continue; #ifdef CONFIG_BLK_DEV_INITRD - /* mapstart should be after initrd_end */ + /* Skip zones before initrd and initrd itself */ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) continue; #endif @@ -371,6 +371,14 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } +#ifdef CONFIG_BLK_DEV_INITRD + /* + * mapstart should be after initrd_end + */ + if (initrd_end) + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); +#endif + /* * Initialize the boot-time allocator with low memory only. */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -42,6 +42,7 @@ #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> +#include <asm/maar.h> cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ @@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); + maar_init(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -64,6 +64,9 @@ void __init prom_init_env(void) } if (memsize == 0) memsize = 256; + + loongson_sysconf.nr_uarts = 1; + pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); #else struct boot_params *boot_p; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -44,6 +44,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> +#include <asm/maar.h> /* * We have up to 8 empty zeroed pages so we can map one of the right colour @@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + struct maar_config cfg[BOOT_MEM_MAP_MAX]; + unsigned i, num_configured, num_cfg = 0; + phys_addr_t skip; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: + continue; + } + + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); + + cfg[num_cfg].lower = boot_mem_map.map[i].addr; + cfg[num_cfg].lower += skip; + + cfg[num_cfg].upper = cfg[num_cfg].lower; + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; + cfg[num_cfg].upper -= skip; + + cfg[num_cfg].attrs = MIPS_MAAR_S; + num_cfg++; + } + + num_configured = maar_config(cfg, num_cfg, num_pairs); + if (num_configured < num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", + num_pairs, num_cfg); + + return num_configured; +} + +void maar_init(void) +{ + unsigned num_maars, used, i; + phys_addr_t lower, upper, attr; + static struct { + struct maar_config cfgs[3]; + unsigned used; + } recorded = { { { 0 } }, 0 }; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Set MAARs using values we recorded already */ + if (recorded.used) { + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); + BUG_ON(used != recorded.used); + } else { + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + } + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } + + if (recorded.used) + return; + + pr_info("MAAR configuration:\n"); + for (i = 0; i < num_maars; i += 2) { + write_c0_maari(i); + back_to_back_c0_hazard(); + upper = read_c0_maar(); + + write_c0_maari(i + 1); + back_to_back_c0_hazard(); + lower = read_c0_maar(); + + attr = lower & upper; + lower = (lower & MIPS_MAAR_ADDR) << 4; + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; + + pr_info(" [%d]: ", i / 2); + if (!(attr & MIPS_MAAR_V)) { + pr_cont("disabled\n"); + continue; + } + + pr_cont("%pa-%pa", &lower, &upper); + + if (attr & MIPS_MAAR_S) + pr_cont(" speculate"); + + pr_cont("\n"); + + /* Record the setup for use on secondary CPUs */ + if (used <= ARRAY_SIZE(recorded.cfgs)) { + recorded.cfgs[recorded.used].lower = lower; + recorded.cfgs[recorded.used].upper = upper; + recorded.cfgs[recorded.used].attrs = attr; + recorded.used++; + } + } +} + #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { @@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) #endif } -unsigned __weak platform_maar_init(unsigned num_pairs) -{ - struct maar_config cfg[BOOT_MEM_MAP_MAX]; - unsigned i, num_configured, num_cfg = 0; - phys_addr_t skip; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - continue; - } - - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); - - cfg[num_cfg].lower = boot_mem_map.map[i].addr; - cfg[num_cfg].lower += skip; - - cfg[num_cfg].upper = cfg[num_cfg].lower; - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; - cfg[num_cfg].upper -= skip; - - cfg[num_cfg].attrs = MIPS_MAAR_S; - num_cfg++; - } - - num_configured = maar_config(cfg, num_cfg, num_pairs); - if (num_configured < num_cfg) - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", - num_pairs, num_cfg); - - return num_configured; -} - -static void maar_init(void) -{ - unsigned num_maars, used, i; - - if (!cpu_has_maar) - return; - - /* Detect the number of MAARs */ - write_c0_maari(~0); - back_to_back_c0_hazard(); - num_maars = read_c0_maari() + 1; - - /* MAARs should be in pairs */ - WARN_ON(num_maars % 2); - - /* Configure the required MAARs */ - used = platform_maar_init(num_maars / 2); - - /* Disable any further MAARs */ - for (i = (used * 2); i < num_maars; i++) { - write_c0_maari(i); - back_to_back_c0_hazard(); - write_c0_maar(0); - back_to_back_c0_hazard(); - } -} - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S @@ -57,15 +57,28 @@ LEAF(sk_load_word) is_offset_negative(word) - .globl sk_load_word_positive -sk_load_word_positive: +FEXPORT(sk_load_word_positive) is_offset_in_header(4, word) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lw $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A rotr $r_A, t0, 16 +# else + sll t0, $r_A, 24 + srl t1, $r_A, 24 + srl t2, $r_A, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_A, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -73,15 +86,24 @@ sk_load_word_positive: LEAF(sk_load_half) is_offset_negative(half) - .globl sk_load_half_positive -sk_load_half_positive: +FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lh $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A seh $r_A, t0 +# else + sll t0, $r_A, 24 + andi t1, $r_A, 0xff00 + sra t0, t0, 16 + srl t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -89,8 +111,7 @@ sk_load_half_positive: LEAF(sk_load_byte) is_offset_negative(byte) - .globl sk_load_byte_positive -sk_load_byte_positive: +FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset @@ -148,23 +169,47 @@ sk_load_byte_positive: NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) bpf_slow_path_common(4) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_s0 jr $r_ra rotr $r_A, t0, 16 -#endif +# else + sll t0, $r_s0, 24 + srl t1, $r_s0, 24 + srl t2, $r_s0, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_s0, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra - move $r_A, $r_s0 + move $r_A, $r_s0 +#endif END(bpf_slow_path_word) NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) bpf_slow_path_common(2) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) jr $r_ra wsbh $r_A, $r_s0 -#endif +# else + sll t0, $r_s0, 8 + andi t1, $r_s0, 0xff00 + andi t0, t0, 0xff00 + srl t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra move $r_A, $r_s0 +#endif END(bpf_slow_path_half) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -61,4 +61,5 @@ generic-y += types.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index ab9f4e0ed4cf..ac1662956e0c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += rwsem.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -13,3 +13,4 @@ generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h +generic-y += word-at-a-time.h diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/string.h> #include <gxio/iorpc_globals.h> #include <gxio/iorpc_mpipe.h> @@ -29,32 +30,6 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow -/** - * strscpy - Copy a C-string into a sized buffer, but only if it fits - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @size: size of destination buffer - * - * Use this routine to avoid copying too-long strings. - * The routine returns the total number of bytes copied - * (including the trailing NUL) or zero if the buffer wasn't - * big enough. To ensure that programmers pay attention - * to the return code, the destination has a single NUL - * written at the front (if size is non-zero) when the - * buffer is not big enough. - */ -static size_t strscpy(char *dest, const char *src, size_t size) -{ - size_t len = strnlen(src, size) + 1; - if (len > size) { - if (size) - dest[0] = '\0'; - return 0; - } - memcpy(dest, src, len); - return len; -} - int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); @@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index ba35c41c71ff..0b6cacaad933 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -40,4 +40,5 @@ generic-y += termbits.h generic-y += termios.h generic-y += trace_clock.h generic-y += types.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/usb/tilegx.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/types.h> static u64 ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,4 +25,5 @@ generic-y += preempt.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -62,4 +62,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e6cf2ad350d1..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -193,7 +193,7 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index ab5f1d447ef9..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...); extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); +#ifdef CONFIG_KASAN /* * CONFIG_KASAN may redefine memset to __memset. __memset function is present * only in kernel binary. Since the EFI stub linked into a separate binary it @@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, #undef memcpy #undef memset #undef memmove +#endif #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b98b471a3b7e..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -141,6 +141,8 @@ #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) +#define MSR_PEBS_FRONTEND 0x000003f7 + #define MSR_IA32_POWER_CTL 0x000001fc #define MSR_IA32_MC0_CTL 0x00000400 diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h @@ -41,6 +41,7 @@ struct pvclock_wall_clock { #define PVCLOCK_TSC_STABLE_BIT (1 << 0) #define PVCLOCK_GUEST_STOPPED (1 << 1) +/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PVCLOCK_ABI_H */ diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h @@ -1,7 +1,7 @@ #ifndef __ASM_X86_BITSPERLONG_H #define __ASM_X86_BITSPERLONG_H -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__ILP32__) # define __BITS_PER_LONG 64 #else # define __BITS_PER_LONG 32 diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -34,11 +34,10 @@ struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); -static void (*hv_kexec_handler)(void); -static void (*hv_crash_handler)(struct pt_regs *regs); - #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); void hyperv_vector_handler(struct pt_regs *regs) { @@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) hv_crash_handler = NULL; } EXPORT_SYMBOL_GPL(hv_remove_crash_handler); -#endif +#ifdef CONFIG_KEXEC_CORE static void hv_machine_shutdown(void) { if (kexec_in_progress && hv_kexec_handler) @@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) hv_crash_handler(regs); native_machine_crash_shutdown(regs); } - +#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) { @@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif +#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; +#endif mark_tsc_unstable("running on Hyper-V"); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -47,6 +47,7 @@ enum extra_reg_type { EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_LBR = 2, /* lbr_select */ EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ + EXTRA_REG_FE = 4, /* fe_* */ EXTRA_REG_MAX /* number of entries needed */ }; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3fefebfbdf4b..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + /* + * Note the low 8 bits eventsel code is not a continuous field, containing + * some #GPing bits. These are masked out. + */ + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), EVENT_EXTRA_END }; @@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ + INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ EVENT_CONSTRAINT_END }; @@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); +PMU_FORMAT_ATTR(frontend, "config1:0-23"); + static struct attribute *intel_arch3_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { NULL, }; +static struct attribute *skl_format_attr[] = { + &format_attr_frontend.attr, + NULL, +}; + static __initconst const struct x86_pmu core_pmu = { .name = "core", .handle_irq = x86_pmu_handle_irq, @@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + skl_format_attr); WARN_ON(!x86_pmu.format_attrs); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -10,12 +10,12 @@ enum perf_msr_id { PERF_MSR_EVENT_MAX, }; -bool test_aperfmperf(int idx) +static bool test_aperfmperf(int idx) { return boot_cpu_has(X86_FEATURE_APERFMPERF); } -bool test_intel(int idx) +static bool test_intel(int idx) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, - { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) } #ifdef CONFIG_KEXEC_FILE -static int get_nr_ram_ranges_callback(unsigned long start_pfn, - unsigned long nr_pfn, void *arg) +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) { - int *nr_ranges = arg; + unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; @@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ced->image = image; - walk_system_ram_range(0, -1, &nr_ranges, + walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); ced->max_nr_ranges = nr_ranges; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..39e585a554b7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +/* + * Called from fs/proc with a reference on @p to find the function + * which called into schedule(). This needs to be done carefully + * because the task might wake up and we might look at a stack + * changing under us. + */ +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long start, bottom, top, sp, fp, ip; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + start = (unsigned long)task_stack_page(p); + if (!start) + return 0; + + /* + * Layout of the stack page: + * + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) + * PADDING + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING + * stack + * ----------- bottom = start + sizeof(thread_info) + * thread_info + * ----------- start + * + * The tasks stack pointer points at the location where the + * framepointer is stored. The data on the stack is: + * ... IP FP ... IP FP + * + * We need to read FP and IP, so we need to adjust the upper + * bound by another unsigned long. + */ + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; + top -= 2 * sizeof(unsigned long); + bottom = start + sizeof(struct thread_info); + + sp = READ_ONCE(p->thread.sp); + if (sp < bottom || sp > top) + return 0; + + fp = READ_ONCE(*(unsigned long *)sp); + do { + if (fp < bottom || fp > top) + return 0; + ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); + if (!in_sched_functions(ip)) + return ip; + fp = READ_ONCE(*(unsigned long *)fp); + } while (count++ < 16 && p->state != TASK_RUNNING); + return 0; +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } - -#define top_esp (THREAD_SIZE - sizeof(unsigned long)) -#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long bp, sp, ip; - unsigned long stack_page; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack_page = (unsigned long)task_stack_page(p); - sp = p->thread.sp; - if (!stack_page || sp < stack_page || sp > top_esp+stack_page) - return 0; - /* include/asm-i386/system.h:switch_to() pushes bp last. */ - bp = *(unsigned long *) sp; - do { - if (bp < stack_page || bp > top_ebp+stack_page) - return 0; - ip = *(unsigned long *) (bp+4); - if (!in_sched_functions(ip)) - return ip; - bp = *(unsigned long *) bp; - } while (count++ < 16); - return 0; -} - diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) } EXPORT_SYMBOL_GPL(set_personality_ia32); -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long stack; - u64 fp, ip; - int count = 0; - - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) - return 0; - fp = *(u64 *)(p->thread.sp); - do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) - return 0; - ip = *(u64 *)(fp+8); - if (!in_sched_functions(ip)) - return ip; - fp = *(u64 *)fp; - } while (count++ < 16); - return 0; -} - long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) { int ret = 0; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 94b7d15db3fc..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } @@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } -#define MTRR_TYPE_UC_MINUS 7 -#define MTRR2PROTVAL_INVALID 0xff - -static u8 mtrr2protval[8]; - -static u8 fallback_mtrr_type(int mtrr) -{ - /* - * WT and WP aren't always available in the host PAT. Treat - * them as UC and UC- respectively. Everything else should be - * there. - */ - switch (mtrr) - { - case MTRR_TYPE_WRTHROUGH: - return MTRR_TYPE_UNCACHABLE; - case MTRR_TYPE_WRPROT: - return MTRR_TYPE_UC_MINUS; - default: - BUG(); - } -} - -static void build_mtrr2protval(void) -{ - int i; - u64 pat; - - for (i = 0; i < 8; i++) - mtrr2protval[i] = MTRR2PROTVAL_INVALID; - - /* Ignore the invalid MTRR types. */ - mtrr2protval[2] = 0; - mtrr2protval[3] = 0; - - /* - * Use host PAT value to figure out the mapping from guest MTRR - * values to nested page table PAT/PCD/PWT values. We do not - * want to change the host PAT value every time we enter the - * guest. - */ - rdmsrl(MSR_IA32_CR_PAT, pat); - for (i = 0; i < 8; i++) { - u8 mtrr = pat >> (8 * i); - - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) - mtrr2protval[mtrr] = __cm_idx2pte(i); - } - - for (i = 0; i < 8; i++) { - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { - u8 fallback = fallback_mtrr_type(i); - mtrr2protval[i] = mtrr2protval[fallback]; - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); - } - } -} - static __init int svm_hardware_setup(void) { int cpu; @@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void) } else kvm_disable_tdp(); - build_mtrr2protval(); return 0; err: @@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) return target_tsc - tsc; } -static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - - /* Unlike Intel, AMD takes the guest's CR0.CD into account. - * - * AMD does not have IPAT. To emulate it for the case of guests - * with no assigned devices, just set everything to WB. If guests - * have assigned devices, however, we cannot force WB for RAM - * pages only, so use the guest PAT directly. - */ - if (!kvm_arch_has_assigned_device(vcpu->kvm)) - *g_pat = 0x0606060606060606; - else - *g_pat = vcpu->arch.pat; -} - -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - u8 mtrr; - - /* - * 1. MMIO: trust guest MTRR, so same as item 3. - * 2. No passthrough: always map as WB, and force guest PAT to WB as well - * 3. Passthrough: can't guarantee the result, try to trust guest. - */ - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) - return 0; - - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && - kvm_read_cr0(vcpu) & X86_CR0_CD) - return _PAGE_NOCACHE; - - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - return mtrr2protval[mtrr]; -} - static void init_vmcb(struct vcpu_svm *svm, bool init_event) { struct vmcb_control_area *control = &svm->vmcb->control; @@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; - svm_set_guest_pat(svm, &save->g_pat); save->cr3 = 0; save->cr4 = 0; } @@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - - /* These are emulated via page tables. */ - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); - + /* + * re-enable caching here because the QEMU bios + * does not do it - this results in some delay at + * reboot + */ + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_IA32_CR_PAT: - if (npt_enabled) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; - vcpu->arch.pat = data; - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); - mark_dirty(svm->vmcb, VMCB_NPT); - break; - } - /* fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) return true; } +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + return 0; +} + static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64076740251e..06ef4908ba61 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) u64 ipat = 0; /* For VT-d and EPT combination - * 1. MMIO: guest may want to apply WC, trust it. + * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. So the same as item 1. + * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + if (is_mmio) { + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 991466bf8dee..92511d4b7236 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->pvclock_set_guest_stopped_request = false; } - pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; - /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; @@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) &vcpu->requests); ka->boot_vcpu_runs_old_kvmclock = tmp; - - ka->kvmclock_offset = -get_kernel_ns(); } vcpu->arch.time = data; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) * has been zapped already via cleanup_highmem(). */ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -705,6 +705,70 @@ out: } /* + * Iterate the EFI memory map in reverse order because the regions + * will be mapped top-down. The end result is the same as if we had + * mapped things forward, but doesn't require us to change the + * existing implementation of efi_map_region(). + */ +static inline void *efi_map_next_entry_reverse(void *entry) +{ + /* Initial call */ + if (!entry) + return memmap.map_end - memmap.desc_size; + + entry -= memmap.desc_size; + if (entry < memmap.map) + return NULL; + + return entry; +} + +/* + * efi_map_next_entry - Return the next EFI memory map descriptor + * @entry: Previous EFI memory map descriptor + * + * This is a helper function to iterate over the EFI memory map, which + * we do in different orders depending on the current configuration. + * + * To begin traversing the memory map @entry must be %NULL. + * + * Returns %NULL when we reach the end of the memory map. + */ +static void *efi_map_next_entry(void *entry) +{ + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { + /* + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE + * config table feature requires us to map all entries + * in the same order as they appear in the EFI memory + * map. That is to say, entry N must have a lower + * virtual address than entry N+1. This is because the + * firmware toolchain leaves relative references in + * the code/data sections, which are split and become + * separate EFI memory regions. Mapping things + * out-of-order leads to the firmware accessing + * unmapped addresses. + * + * Since we need to map things this way whether or not + * the kernel actually makes use of + * EFI_PROPERTIES_TABLE, let's just switch to this + * scheme by default for 64-bit. + */ + return efi_map_next_entry_reverse(entry); + } + + /* Initial call */ + if (!entry) + return memmap.map; + + entry += memmap.desc_size; + if (entry >= memmap.map_end) + return NULL; + + return entry; +} + +/* * Map the efi memory ranges of the runtime services and update new_mmap with * virtual addresses. */ @@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) unsigned long left = 0; efi_memory_desc_t *md; - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + p = NULL; + while ((p = efi_map_next_entry(p))) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) { #ifdef CONFIG_X86_64 diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -28,4 +28,5 @@ generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) return cpu; } -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; @@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; - for_each_online_cpu(i) { + for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) @@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) queue = 0; for_each_possible_cpu(i) { - if (!cpu_online(i)) { + if (!cpumask_test_cpu(i, online_mask)) { map[i] = 0; continue; } @@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) if (!map) return NULL; - if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) return map; kfree(map); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) unsigned int i, first = 1; ssize_t ret = 0; - blk_mq_disable_hotplug(); - for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); @@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) first = 0; } - blk_mq_enable_hotplug(); - ret += sprintf(ret + page, "\n"); return ret; } @@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return; hctx_for_each_ctx(hctx, ctx, i) @@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i, ret; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return 0; ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); @@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) struct blk_mq_ctx *ctx; int i, j; + blk_mq_disable_hotplug(); + queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); @@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); + + q->mq_sysfs_init_done = false; + blk_mq_enable_hotplug(); } static void blk_mq_sysfs_init(struct request_queue *q) @@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) struct blk_mq_hw_ctx *hctx; int ret, i; + blk_mq_disable_hotplug(); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) - return ret; + goto out; kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - hctx->flags |= BLK_MQ_F_SYSFS_UP; ret = blk_mq_register_hctx(hctx); if (ret) break; } - if (ret) { + if (ret) blk_mq_unregister_disk(disk); - return ret; - } + else + q->mq_sysfs_init_done = true; +out: + blk_mq_enable_hotplug(); - return 0; + return ret; } EXPORT_SYMBOL_GPL(blk_mq_register_disk); @@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + if (!q->mq_sysfs_init_done) + return; + queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } @@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; + if (!q->mq_sysfs_init_done) + return ret; + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ed96474d75cb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { - struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_hw_ctx *hctx; + int i; + + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; + + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!blk_mq_hw_queue_mapped(hctx)) + continue; + + if (tags->nr_reserved_tags) + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, + false); + } - if (tags->nr_reserved_tags) - bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); - bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, - false); } -EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, + void *priv); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..7785ae96267a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ -void blk_mq_complete_request(struct request *rq) +void blk_mq_complete_request(struct request *rq, int error) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) + if (!blk_mark_rq_complete(rq)) { + rq->errors = error; __blk_mq_complete_request(rq); + } } EXPORT_SYMBOL(blk_mq_complete_request); @@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ - if (unlikely(blk_queue_dying(rq->q))) { - rq->errors = -EIO; - blk_mq_complete_request(rq); - } + if (unlikely(blk_queue_dying(rq->q))) + blk_mq_complete_request(rq, -EIO); return; } if (rq->cmd_flags & REQ_NO_TIMEOUT) @@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) .next = 0, .next_set = 0, }; - struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - /* - * If not software queues are currently mapped to this - * hardware queue, there's nothing to check - */ - if (!blk_mq_hw_queue_mapped(hctx)) - continue; - - blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); - } + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { + struct blk_mq_hw_ctx *hctx; + queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) @@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static void blk_mq_map_swqueue(struct request_queue *q) +static void blk_mq_map_swqueue(struct request_queue *q, + const struct cpumask *online_mask) { unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; + /* + * Avoid others reading imcomplete hctx->cpumask through sysfs + */ + mutex_lock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpu_online(i)) + if (!cpumask_test_cpu(i, online_mask)) continue; hctx = q->mq_ops->map_queue(q, i); cpumask_set_cpu(i, hctx->cpumask); - cpumask_set_cpu(i, hctx->tags->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + mutex_unlock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_ctxmap *map = &hctx->ctx_map; @@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } + + queue_for_each_ctx(q, ctx, i) { + if (!cpumask_test_cpu(i, online_mask)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->tags->cpumask); + } } static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) @@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) kfree(hctx); } + kfree(q->mq_map); + q->mq_map = NULL; + kfree(q->queue_hw_ctx); /* ctx kobj stays in queue_ctx */ @@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (blk_mq_init_hw_queues(q, set)) goto err_hctxs; + get_online_cpus(); mutex_lock(&all_q_mutex); - list_add_tail(&q->all_q_node, &all_q_list); - mutex_unlock(&all_q_mutex); + list_add_tail(&q->all_q_node, &all_q_list); blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q, cpu_online_mask); - blk_mq_map_swqueue(q); + mutex_unlock(&all_q_mutex); + put_online_cpus(); return q; @@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; + mutex_lock(&all_q_mutex); + list_del_init(&q->all_q_node); + mutex_unlock(&all_q_mutex); + blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); percpu_ref_exit(&q->mq_usage_counter); - - kfree(q->mq_map); - - q->mq_map = NULL; - - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); } /* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) +static void blk_mq_queue_reinit(struct request_queue *q, + const struct cpumask *online_mask) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); blk_mq_sysfs_unregister(q); - blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); /* * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe @@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) * involves free and re-allocate memory, worthy doing?) */ - blk_mq_map_swqueue(q); + blk_mq_map_swqueue(q, online_mask); blk_mq_sysfs_register(q); } @@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { struct request_queue *q; + int cpu = (unsigned long)hcpu; + /* + * New online cpumask which is going to be set in this hotplug event. + * Declare this cpumasks as global as cpu-hotplug operation is invoked + * one-by-one and dynamically allocating this could result in a failure. + */ + static struct cpumask online_new; /* - * Before new mappings are established, hotadded cpu might already - * start handling requests. This doesn't break anything as we map - * offline CPUs to first hardware queue. We will re-init the queue - * below to get optimal settings. + * Before hotadded cpu starts handling requests, new mappings must + * be established. Otherwise, these requests in hw queue might + * never be dispatched. + * + * For example, there is a single hw queue (hctx) and two CPU queues + * (ctx0 for CPU0, and ctx1 for CPU1). + * + * Now CPU1 is just onlined and a request is inserted into + * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is + * still zero. + * + * And then while running hw queue, flush_busy_ctxs() finds bit0 is + * set in pending bitmap and tries to retrieve requests in + * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, + * so the request in ctx1->rq_list is ignored. */ - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && - action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DEAD: + case CPU_UP_CANCELED: + cpumask_copy(&online_new, cpu_online_mask); + break; + case CPU_UP_PREPARE: + cpumask_copy(&online_new, cpu_online_mask); + cpumask_set_cpu(cpu, &online_new); + break; + default: return NOTIFY_OK; + } mutex_lock(&all_q_mutex); @@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, } list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_queue_reinit(q); + blk_mq_queue_reinit(q, &online_new); list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_unfreeze_queue(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); * CPU -> queue mappings */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); -extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) srlen = cert->raw_serial_size; q = cert->raw_serial; } - if (srlen > 1 && *q == 0) { - srlen--; - q++; - } ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) goto err_exit; mutex_lock(&ec->mutex); + result = -ENODATA; list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { + result = 0; q->handler = acpi_ec_get_query_handler(handler); ec_dbg_evt("Query(0x%02x) scheduled", q->handler->query_bit); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && + acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) PIRQ_PENALTY_PCI_POSSIBLE; } } - /* Add a penalty for the SCI */ - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; + return 0; } @@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) irq = link->irq.possible[i]; } } + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " + "Try pci=noacpi or acpi=off\n", + acpi_device_name(link->device), + acpi_device_bid(link->device)); + return -ENODEV; + } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { @@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) } } +bool acpi_isa_irq_available(int irq) +{ + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); +} + /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) u32 microvolt[3] = {0}; int count, ret; - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); - if (!count) + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) return 0; + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + /* There can be one or three elements here */ if (count != 1 && count != 3) { dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", @@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); * share a common logic which is isolated here. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. * * Locking: The internal device_opp and opp structures are RCU protected. @@ -1151,7 +1158,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a295b98c6bae..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..6f04771f1019 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, spin_unlock_irqrestore(req->q->queue_lock, flags); return; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; - else - req->errors = status; + status = -EINTR; } else { - req->errors = nvme_error_status(status); + status = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + blk_mq_complete_request(req, status); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, static int xen_blkif_disconnect(struct xen_blkif *blkif) { + struct pending_req *req, *n; + int i = 0, j; + if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); + /* Check that there is no request in use */ + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { + list_del(&req->free_list); + + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) + kfree(req->segments[j]); + + for (j = 0; j < MAX_INDIRECT_PAGES; j++) + kfree(req->indirect_pages[j]); + + kfree(req); + i++; + } + + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); + blkif->nr_ring_pages = 0; + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) { - struct pending_req *req, *n; - int i = 0, j; xen_blkif_disconnect(blkif); xen_vbd_free(&blkif->vbd); @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) BUG_ON(!list_empty(&blkif->free_pages)); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); - /* Check that there is no request in use */ - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { - list_del(&req->free_list); - - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) - kfree(req->segments[j]); - - for (j = 0; j < MAX_INDIRECT_PAGES; j++) - kfree(req->indirect_pages[j]); - - kfree(req); - i++; - } - - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - kmem_cache_free(xen_blkif_cachep, blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..611170896b8c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; spin_lock_irqsave(&info->io_lock, flags); @@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) continue; } - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } - if (unlikely(req->errors)) { - if (req->errors == -EOPNOTSUPP) - req->errors = 0; + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; default: BUG(); diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; @@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, @@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | @@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -13,6 +13,7 @@ */ #include <linux/efi.h> +#include <linux/sort.h> #include <asm/efi.h> #include "efistub.h" @@ -305,6 +306,44 @@ fail: */ #define EFI_RT_VIRTUAL_BASE 0x40000000 +static int cmp_mem_desc(const void *l, const void *r) +{ + const efi_memory_desc_t *left = l, *right = r; + + return (left->phys_addr > right->phys_addr) ? 1 : -1; +} + +/* + * Returns whether region @left ends exactly where region @right starts, + * or false if either argument is NULL. + */ +static bool regions_are_adjacent(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + u64 left_end; + + if (left == NULL || right == NULL) + return false; + + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; + + return left_end == right->phys_addr; +} + +/* + * Returns whether region @left and region @right have compatible memory type + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. + */ +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | + EFI_MEMORY_WC | EFI_MEMORY_UC | + EFI_MEMORY_RUNTIME; + + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; +} + /* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; - efi_memory_desc_t *out = runtime_map; + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; int l; - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *in = (void *)memory_map + l; + /* + * To work around potential issues with the Properties Table feature + * introduced in UEFI 2.5, which may split PE/COFF executable images + * in memory into several RuntimeServicesCode and RuntimeServicesData + * regions, we need to preserve the relative offsets between adjacent + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. + * The easiest way to find adjacent regions is to sort the memory map + * before traversing it. + */ + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); + + for (l = 0; l < map_size; l += desc_size, prev = in) { u64 paddr, size; + in = (void *)memory_map + l; if (!(in->attribute & EFI_MEMORY_RUNTIME)) continue; + paddr = in->phys_addr; + size = in->num_pages * EFI_PAGE_SIZE; + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - paddr = round_down(in->phys_addr, SZ_64K); - size = round_up(in->num_pages * EFI_PAGE_SIZE + - in->phys_addr - paddr, SZ_64K); - - /* - * Avoid wasting memory on PTEs by choosing a virtual base that - * is compatible with section mappings if this region has the - * appropriate size and physical alignment. (Sections are 2 MB - * on 4k granule kernels) - */ - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) - efi_virt_base = round_up(efi_virt_base, SZ_2M); + if (!regions_are_adjacent(prev, in) || + !regions_have_compatible_memory_type_attrs(prev, in)) { + + paddr = round_down(in->phys_addr, SZ_64K); + size += in->phys_addr - paddr; + + /* + * Avoid wasting memory on PTEs by choosing a virtual + * base that is compatible with section mappings if this + * region has the appropriate size and physical + * alignment. (Sections are 2 MB on 4k granule kernels) + */ + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) + efi_virt_base = round_up(efi_virt_base, SZ_2M); + else + efi_virt_base = round_up(efi_virt_base, SZ_64K); + } in->virt_addr = efi_virt_base + in->phys_addr - paddr; efi_virt_base += size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 1c3fc99c5465..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..cb3c274edb0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) uint64_t *chunk_array_user; uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; + unsigned size; + int i; int ret; if (cs->in.num_chunks == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -27,19 +27,6 @@ #include "cgs_common.h" /** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - -/** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source * @src_id: interrupt source ID @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..bf27a07dbce3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..7412caedcf7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index dd845f82cc24..4649bd2ed340 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..6cddae44fa6e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..1aa657fe31cb 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + (void) vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { { .compatible = "stericsson,abx500-temp" }, {}, }; +MODULE_DEVICE_TABLE(of, abx500_temp_match); #endif static struct platform_driver abx500_temp_driver = { diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { { .compatible = "gpio-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_gpio_fan_match); #endif /* CONFIG_OF_GPIO */ static int gpio_fan_probe(struct platform_device *pdev) diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { { .compatible = "pwm-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { .name = "C6-SKL", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 75, + .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, @@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = { .name = "C8-SKL", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 174, + .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { + .name = "C9-SKL", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, + .target_residency = 5000, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { .name = "C10-SKL", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; @@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm return 0; } -static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) -{ - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *seg; - struct mlx5_core_mr mr; - int err; - - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - seg = &in->seg; - seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; - seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - seg->start_addr = 0; - - err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), - NULL, NULL, NULL); - if (err) { - mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); - goto err_in; - } - - kfree(in); - *key = mr.key; - - return 0; - -err_in: - kfree(in); - - return err; -} - -static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) -{ - struct mlx5_core_mr mr; - int err; - - memset(&mr, 0, sizeof(mr)); - mr.key = key; - err = mlx5_core_destroy_mkey(dev->mdev, &mr); - if (err) - mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); -} - static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) @@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, kfree(pd); return ERR_PTR(-EFAULT); } - } else { - err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); - if (err) { - mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); - kfree(pd); - return ERR_PTR(err); - } } return &pd->ibpd; @@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); - if (!pd->uobject) - free_pa_mkey(mdev, mpd->pa_lkey); - mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); kfree(mpd); @@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; - u32 rsvd_lkey; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); - ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); - if (ret) { - pr_err("Failed to query special context %d\n", ret); - return ret; - } - dev->ib_dev.local_dma_lkey = rsvd_lkey; - devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); @@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte struct mlx5_ib_pd { struct ib_pd ibpd; u32 pdn; - u32 pa_lkey; }; /* Use macros here so that don't have to duplicate @@ -213,7 +212,6 @@ struct mlx5_ib_qp { int uuarn; int create_type; - u32 pa_lkey; /* Store signature errors */ bool signature_en; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); - else - qp->pa_lkey = to_mpd(pd)->pa_lkey; } if (err) @@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); dseg->addr = cpu_to_be64(mfrpl->map); dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); - dseg->lkey = cpu_to_be32(pd->pa_lkey); + dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); } static __be32 send_ieth(struct ib_send_wr *wr) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..4cd5428a2399 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -80,7 +80,7 @@ enum { IPOIB_NUM_WC = 4, IPOIB_MAX_PATH_REC_QUEUE = 3, - IPOIB_MAX_MCAST_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_INITIALIZED = 1, @@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey); +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); int ipoib_init_qp(struct net_device *dev); int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..f74316e679d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) unsigned long dt; unsigned long flags; int i; + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + struct net_device *dev = priv->dev; if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; @@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { + u8 *mgid = neigh->daddr + 4; + + /* Is this multicast ? */ + if (*mgid == 0xff) { + mcast = __ipoib_mcast_find(dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + } + rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, lockdep_is_held(&priv->lock))); @@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + ipoib_mcast_leave(dev, mcast); } static void ipoib_reap_neigh(struct work_struct *work) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..136cbefe00f8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, return mcast; } -static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; @@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) rec.hop_limit = priv->broadcast->mcmember.hop_limit; /* - * Historically Linux IPoIB has never properly supported SEND - * ONLY join. It emulated it by not providing all the required - * attributes, which is enough to prevent group creation and - * detect if there are full members or not. A major problem - * with supporting SEND ONLY is detecting when the group is - * auto-destroyed as IPoIB will cache the MLID.. + * Send-only IB Multicast joins do not work at the core + * IB layer yet, so we can't use them here. However, + * we are emulating an Ethernet multicast send, which + * does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspecitive, of Ethernet multicast + * operation. For now, we do a full join, maybe later + * when the core IB layers support send only joins we + * will use them. */ -#if 1 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; -#else +#if 0 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) rec.join_state = 4; #endif @@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) return 0; } -static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -611,6 +611,7 @@ extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; extern unsigned int iser_max_sectors; +extern bool iser_always_reg; int iser_assign_reg_ops(struct iser_device *device); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -803,11 +803,12 @@ static int iser_reg_prot_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); @@ -817,11 +818,12 @@ static int iser_reg_data_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); @@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, struct iser_mem_reg *reg = &task->rdma_reg[dir]; struct iser_mem_reg *data_reg; struct iser_fr_desc *desc = NULL; + bool use_dma_key; int err; err = iser_handle_unaligned_buf(task, mem, dir); if (unlikely(err)) return err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { desc = device->reg_ops->reg_desc_get(ib_conn); reg->mem_h = desc; } @@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, else data_reg = &task->desc.data_reg; - err = iser_reg_data_sg(task, mem, desc, data_reg); + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); if (unlikely(err)) goto err_reg; @@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, if (unlikely(err)) goto err_reg; - err = iser_reg_prot_sg(task, mem, desc, prot_reg); + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); if (unlikely(err)) goto err_reg; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -173,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); + if (device->mr) + (void)ib_dereg_mr(device->mr); ib_dealloc_pd(device->pd); kfree(device->comps); diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY config JOYSTICK_ZHENHUA tristate "5-byte Zhenhua RC transmitter" select SERIO + select BITREVERSE help Say Y here if you have a Zhen Hua PPM-4CH transmitter which is supplied with a ready to fly micro electric indoor helicopters diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) if (w->counter == 24) { /* full frame */ walkera0701_parse_frame(w); w->counter = NO_SYNC; - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ w->counter = 0; } else { if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) @@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) } else w->counter = NO_SYNC; } - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ w->counter = 0; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); if (error) - return error; + goto err_free_keypad; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, default: reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; - }; + } error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - for_each_set_bit(i, dev->absbit, ABS_CNT) { + for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -60,7 +60,7 @@ struct elan_transport_ops { int (*get_sm_version)(struct i2c_client *client, u8* ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); - int (*get_product_id)(struct i2c_client *client, u8 *id); + int (*get_product_id)(struct i2c_client *client, u16 *id); int (*get_max)(struct i2c_client *client, unsigned int *max_x, unsigned int *max_y); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -40,7 +40,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.0" +#define ELAN_DRIVER_VERSION "1.6.1" #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -76,7 +76,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; - u8 product_id; + u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; @@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, u16 *signature_address) { switch (iap_version) { + case 0x00: + case 0x06: case 0x08: *validpage_count = 512; break; + case 0x03: + case 0x07: case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; + case 0x0E: + *validpage_count = 640; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, &data->fw_signature_address); - if (error) { - dev_err(&data->client->dev, - "unknown iap version %d\n", data->iap_version); - return error; - } + if (error) + dev_warn(&data->client->dev, + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", + data->iap_version, data->ic_type); return 0; } @@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; + if (data->fw_validpage_count == 0) + return -EINVAL; + /* Look for a firmware with the product id appended. */ fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return 0; } -static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) +static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[0]; + *id = le16_to_cpup((__le16 *)val); return 0; } diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) +static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[1]; + *id = be16_to_cpup((__be16 *)val); return 0; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) struct synaptics_data *priv = psmouse->private; priv->mode = 0; - - if (priv->absolute_mode) { + if (priv->absolute_mode) priv->mode |= SYN_BIT_ABSOLUTE_MODE; - if (SYN_CAP_EXTENDED(priv->capabilities)) - priv->mode |= SYN_BIT_W_MODE; - } - - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) + if (priv->disable_gesture) priv->mode |= SYN_BIT_DISABLE_GESTURE; - if (psmouse->rate >= 80) priv->mode |= SYN_BIT_HIGH_RATE; + if (SYN_CAP_EXTENDED(priv->capabilities)) + priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, - command == PS2_CMD_RESET_BAT ? 1000 : 200)) - goto out; + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } - for (i = 0; i < send; i++) - if (ps2_sendbyte(ps2dev, param[i], 200)) - goto out; + for (i = 0; i < send; i++) { + if (ps2_sendbyte(ps2dev, param[i], 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } + } /* * The reset command takes a long time to execute. @@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) !(ps2dev->flags & PS2_FLAG_CMD), timeout); } + serio_pause_rx(ps2dev->serio); + if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) - goto out; + goto out_reset_flags; rc = 0; - out: - serio_pause_rx(ps2dev->serio); + out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -194,6 +194,7 @@ static int __init parkbd_init(void) parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); + parport_unregister_device(parkbd_dev); return -ENOMEM; } diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -94,7 +94,7 @@ struct imx6ul_tsc { * TSC module need ADC to get the measure value. So * before config TSC, we should initialize ADC module. */ -static void imx6ul_adc_init(struct imx6ul_tsc *tsc) +static int imx6ul_adc_init(struct imx6ul_tsc *tsc) { int adc_hc = 0; int adc_gc; @@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) timeout = wait_for_completion_timeout (&tsc->completion, ADC_TIMEOUT); - if (timeout == 0) + if (timeout == 0) { dev_err(tsc->dev, "Timeout for adc calibration\n"); + return -ETIMEDOUT; + } adc_gs = readl(tsc->adc_regs + REG_ADC_GS); - if (adc_gs & ADC_CALF) + if (adc_gs & ADC_CALF) { dev_err(tsc->dev, "ADC calibration failed\n"); + return -EINVAL; + } /* TSC need the ADC work in hardware trigger */ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); adc_cfg |= ADC_HARDWARE_TRIGGER; writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); + + return 0; } /* @@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); } -static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) +static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) { - imx6ul_adc_init(tsc); + int err; + + err = imx6ul_adc_init(tsc); + if (err) + return err; imx6ul_tsc_channel_config(tsc); imx6ul_tsc_set(tsc); + + return 0; } static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) @@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) return err; } - imx6ul_tsc_init(tsc); - - return 0; + return imx6ul_tsc_init(tsc); } static void imx6ul_tsc_close(struct input_dev *input_dev) @@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) int tsc_irq; int adc_irq; - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) return -ENOMEM; @@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) if (!input_dev) return -ENOMEM; - input_dev->name = "iMX6UL TouchScreen Controller"; + input_dev->name = "iMX6UL Touchscreen Controller"; input_dev->id.bustype = BUS_HOST; input_dev->open = imx6ul_tsc_open; @@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) } adc_irq = platform_get_irq(pdev, 1); - if (adc_irq <= 0) { + if (adc_irq < 0) { dev_err(&pdev->dev, "no adc irq resource?\n"); return adc_irq; } @@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) goto out; } - imx6ul_tsc_init(tsc); + retval = imx6ul_tsc_init(tsc); } out: diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) if (of_property_read_u32(np, "x-size", &pdata->x_size)) { dev_err(dev, "failed to get x-size property\n"); return NULL; - }; + } if (of_property_read_u32(np, "y-size", &pdata->y_size)) { dev_err(dev, "failed to get y-size property\n"); return NULL; - }; + } of_property_read_u32(np, "contact-threshold", &pdata->contact_threshold); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..d9da766719c8 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST endmenu config IOMMU_IOVA - bool + tristate config OF_IOMMU def_bool y diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..041bc1810a86 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev, /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + /* Ensure we reserve the whole size-aligned region */ + nrpages = __roundup_pow_of_two(nrpages); if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* @@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void) static int __init iommu_init_mempool(void) { int ret; - ret = iommu_iova_cache_init(); + ret = iova_cache_get(); if (ret) return ret; @@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void) kmem_cache_destroy(iommu_domain_cache); domain_error: - iommu_iova_cache_destroy(); + iova_cache_put(); return -ENOMEM; } @@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); - iommu_iova_cache_destroy(); + iova_cache_put(); } static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -18,42 +18,9 @@ */ #include <linux/iova.h> +#include <linux/module.h> #include <linux/slab.h> -static struct kmem_cache *iommu_iova_cache; - -int iommu_iova_cache_init(void) -{ - int ret = 0; - - iommu_iova_cache = kmem_cache_create("iommu_iova", - sizeof(struct iova), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_iova_cache) { - pr_err("Couldn't create iova cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -void iommu_iova_cache_destroy(void) -{ - kmem_cache_destroy(iommu_iova_cache); -} - -struct iova *alloc_iova_mem(void) -{ - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); -} - -void free_iova_mem(struct iova *iova) -{ - kmem_cache_free(iommu_iova_cache, iova); -} - void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn, unsigned long pfn_32bit) @@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit; } +EXPORT_SYMBOL_GPL(init_iova_domain); static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) @@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) } } -/* Computes the padding size required, to make the - * the start address naturally aligned on its size +/* + * Computes the padding size required, to make the start address + * naturally aligned on the power-of-two order of its size */ -static int -iova_get_pad_size(int size, unsigned int limit_pfn) +static unsigned int +iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - unsigned int pad_size = 0; - unsigned int order = ilog2(size); - - if (order) - pad_size = (limit_pfn + 1) % (1 << order); - - return pad_size; + return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) rb_insert_color(&iova->node, root); } +static struct kmem_cache *iova_cache; +static unsigned int iova_cache_users; +static DEFINE_MUTEX(iova_cache_mutex); + +struct iova *alloc_iova_mem(void) +{ + return kmem_cache_alloc(iova_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL(alloc_iova_mem); + +void free_iova_mem(struct iova *iova) +{ + kmem_cache_free(iova_cache, iova); +} +EXPORT_SYMBOL(free_iova_mem); + +int iova_cache_get(void) +{ + mutex_lock(&iova_cache_mutex); + if (!iova_cache_users) { + iova_cache = kmem_cache_create( + "iommu_iova", sizeof(struct iova), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!iova_cache) { + mutex_unlock(&iova_cache_mutex); + printk(KERN_ERR "Couldn't create iova cache\n"); + return -ENOMEM; + } + } + + iova_cache_users++; + mutex_unlock(&iova_cache_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_cache_get); + +void iova_cache_put(void) +{ + mutex_lock(&iova_cache_mutex); + if (WARN_ON(!iova_cache_users)) { + mutex_unlock(&iova_cache_mutex); + return; + } + iova_cache_users--; + if (!iova_cache_users) + kmem_cache_destroy(iova_cache); + mutex_unlock(&iova_cache_mutex); +} +EXPORT_SYMBOL_GPL(iova_cache_put); + /** * alloc_iova - allocates an iova * @iovad: - iova domain in question @@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - /* If size aligned is set then round the size to - * to next power of two. - */ - if (size_aligned) - size = __roundup_pow_of_two(size); - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); @@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, return new_iova; } +EXPORT_SYMBOL_GPL(alloc_iova); /** * find_iova - find's an iova for a given pfn @@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } +EXPORT_SYMBOL_GPL(find_iova); /** * __free_iova - frees the given iova @@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } +EXPORT_SYMBOL_GPL(__free_iova); /** * free_iova - finds and frees the iova for a given pfn @@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) __free_iova(iovad, iova); } +EXPORT_SYMBOL_GPL(free_iova); /** * put_iova_domain - destroys the iova doamin @@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(put_iova_domain); static int __is_range_overlap(struct rb_node *node, @@ -467,6 +480,7 @@ finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } +EXPORT_SYMBOL_GPL(reserve_iova); /** * copy_reserved_iova - copies the reserved between domains @@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(copy_reserved_iova); struct iova * split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, @@ -534,3 +549,6 @@ error: free_iova_mem(prev); return NULL; } + +MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) dev_alias->dev_id = alias; if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + dev_alias->count += its_pci_msi_vec_count(pdev); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ac7ae2b3cb83..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) out: spin_unlock(&lpi_lock); + if (!bitmap) + *base = *nr_ids = 0; + return bitmap; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index af2f16bb8a94..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) intrmask[i] = gic_read(intrmask_reg); pending_reg += gic_reg_step; intrmask_reg += gic_reg_step; + + if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) + continue; + + pending[i] |= (u64)gic_read(pending_reg) << 32; + intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; + pending_reg += gic_reg_step; + intrmask_reg += gic_reg_step; } bitmap_and(pending, pending, intrmask, gic_shared_intrs); @@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, cpumask_first(&tmp)); + gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); /* Update the pcpu_masks */ for (i = 0; i < NR_CPUS; i++) @@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, GIC_SHARED_TO_HWIRQ(intr)); int i; - gic_map_to_vpe(intr, cpu); + gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); for (i = 0; i < NR_CPUS; i++) clear_bit(intr, pcpu_masks[i].pcpu_mask); set_bit(intr, pcpu_masks[cpu].pcpu_mask); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, - bitmap->cluster_slot); + mddev_is_clustered(bitmap->mddev) + ? bitmap->cluster_slot : 0); if (ret) goto err; diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); goto unlock; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) return 0; out_free_conf: - if (conf->pool) - mempool_destroy(conf->pool); + mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) struct md_rdev *rdev; bool discard_supported = false; - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; - } blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); @@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..049df6c4a8cc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (bio->bi_iter.bi_sector >= - conf->mddev->curr_resync_completed) { + if (bio->bi_iter.bi_sector >= conf->next_resync) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) conf->r1buf_pool = NULL; spin_lock_irq(&conf->resync_lock); - conf->next_resync = 0; + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; conf->start_next_window = MaxSector; conf->current_window_requests += conf->next_window_requests; @@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); @@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..7c99a4037715 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", mdname(mddev)); if (conf) { - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf); @@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); @@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) { struct r10conf *conf = priv; - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf->mirrors_old); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) drop_one_stripe(conf)) ; - if (conf->slab_cache) - kmem_cache_destroy(conf->slab_cache); + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); + if (bi) + s->to_read--; while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = @@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, */ clear_bit(R5_LOCKED, &sh->dev[i].flags); } + s->to_write = 0; + s->written = 0; if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (fdev[i]->towrite && !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) @@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (s->failed_num[i] != sh->pd_idx && s->failed_num[i] != sh->qd_idx && !test_bit(R5_UPTODATE, &fdev[i]->flags) && diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || - (mrq->stop && mrq->stop->error == -EILSEQ)) + (mrq->stop && mrq->stop->error == -EILSEQ))) mmc_retune_needed(host); if (err && cmd->retries && mmc_host_is_spi(host)) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) 0, &cd_gpio_invert); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; /* @@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; if (of_property_read_bool(np, "disable-wp")) diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -28,6 +28,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> #include <linux/io.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> @@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); - if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { - if (host->pdata->gpio_card_ro_invert) - return !gpio_get_value(host->pdata->gpio_card_ro); - else - return gpio_get_value(host->pdata->gpio_card_ro); - } + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) + return mmc_gpio_get_ro(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* @@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) static const struct mmc_host_ops pxamci_ops = { .request = pxamci_request, + .get_cd = mmc_gpio_get_cd, .get_ro = pxamci_get_ro, .set_ios = pxamci_set_ios, .enable_sdio_irq = pxamci_enable_sdio_irq, @@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_power)) { - ret = gpio_request(gpio_power, "mmc card power"); + ret = devm_gpio_request(&pdev->dev, gpio_power, + "mmc card power"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", + gpio_power); goto out; } gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } - if (gpio_is_valid(gpio_ro)) { - ret = gpio_request(gpio_ro, "mmc card read only"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto err_gpio_ro; - } - gpio_direction_input(gpio_ro); + if (gpio_is_valid(gpio_ro)) + ret = mmc_gpio_request_ro(mmc, gpio_ro); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); + goto out; + } else { + mmc->caps |= host->pdata->gpio_card_ro_invert ? + MMC_CAP2_RO_ACTIVE_HIGH : 0; } - if (gpio_is_valid(gpio_cd)) { - ret = gpio_request(gpio_cd, "mmc card detect"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); - goto err_gpio_cd; - } - gpio_direction_input(gpio_cd); - ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "mmc card detect", mmc); - if (ret) { - dev_err(&pdev->dev, "failed to request card detect IRQ\n"); - goto err_request_irq; - } + if (gpio_is_valid(gpio_cd)) + ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); + goto out; } if (host->pdata && host->pdata->init) @@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) return 0; -err_request_irq: - gpio_free(gpio_cd); -err_gpio_cd: - gpio_free(gpio_ro); -err_gpio_ro: - gpio_free(gpio_power); - out: +out: if (host) { if (host->dma_chan_rx) dma_release_channel(host->dma_chan_rx); @@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } - if (gpio_is_valid(gpio_cd)) { - free_irq(gpio_to_irq(gpio_cd), mmc); - gpio_free(gpio_cd); - } - if (gpio_is_valid(gpio_ro)) - gpio_free(gpio_ro); - if (gpio_is_valid(gpio_power)) - gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -210,6 +210,16 @@ #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ +#define SDXC_CLK_400K 0 +#define SDXC_CLK_25M 1 +#define SDXC_CLK_50M 2 +#define SDXC_CLK_50M_DDR 3 + +struct sunxi_mmc_clk_delay { + u32 output; + u32 sample; +}; + struct sunxi_idma_des { u32 config; u32 buf_size; @@ -229,6 +239,7 @@ struct sunxi_mmc_host { struct clk *clk_mmc; struct clk *clk_sample; struct clk *clk_output; + const struct sunxi_mmc_clk_delay *clk_delays; /* irq */ spinlock_t lock; @@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* determine delays */ if (rate <= 400000) { - oclk_dly = 180; - sclk_dly = 42; + oclk_dly = host->clk_delays[SDXC_CLK_400K].output; + sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; } else if (rate <= 25000000) { - oclk_dly = 180; - sclk_dly = 75; + oclk_dly = host->clk_delays[SDXC_CLK_25M].output; + sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; } else if (rate <= 50000000) { if (ios->timing == MMC_TIMING_UHS_DDR50) { - oclk_dly = 60; - sclk_dly = 120; + oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; } else { - oclk_dly = 90; - sclk_dly = 150; + oclk_dly = host->clk_delays[SDXC_CLK_50M].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; } - } else if (rate <= 100000000) { - oclk_dly = 6; - sclk_dly = 24; - } else if (rate <= 200000000) { - oclk_dly = 3; - sclk_dly = 12; } else { return -EINVAL; } @@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", }, { .compatible = "allwinner,sun5i-a13-mmc", }, + { .compatible = "allwinner,sun9i-a80-mmc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); @@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { .hw_reset = sunxi_mmc_hw_reset, }; +static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, +}; + +static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, +}; + static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, struct platform_device *pdev) { @@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, else host->idma_des_size_bits = 16; + if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) + host->clk_delays = sun9i_mmc_clk_delays; + else + host->clk_delays = sunxi_mmc_clk_delays; + ret = mmc_regulator_get_supply(host->mmc); if (ret) { if (ret != -EPROBE_DEFER) diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, goto bad; } + if (data_size > ubi->leb_size) { + ubi_err(ubi, "bad data_size"); + goto bad; + } + if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + err = -ENOSPC; goto out_free; } ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f8baa897d1a0..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else reg |= PORT_CONTROL_FRAME_MODE_DSA; + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) } /* Flush FLI data fifo. */ -static u32 +static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; @@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) } /* Read flash status. */ -static u32 +static int bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg dev_status; - u32 status; + int status; u32 ret_status; int i; @@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) } /* Start flash read operation. */ -static u32 +static int bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { - u32 status; + int status; /* len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) @@ -1703,7 +1703,8 @@ static enum bfa_status bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { - u32 n, status; + u32 n; + int status; u32 off, l, s, residue, fifo_sz; residue = len; diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) struct net_device *ndev; struct hip04_priv *priv; struct resource *res; - unsigned int irq; + int irq; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { u32 index; }; -#define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4SYNC_ETHTOOL_REGS_VER 2 +#define EMAC_ETHTOOL_REGS_VER 3 +#define EMAC4_ETHTOOL_REGS_VER 4 +#define EMAC4SYNC_ETHTOOL_REGS_VER 5 #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..62488a67149d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1014,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..2fdf978ae6a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..929d47152bf2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { @@ -948,6 +955,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1184,10 +1184,11 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + err = new_steering_entry(dev, port, steer, + index, qp->qpn); else - existing_steering_entry(dev, port, steer, - index, qp->qpn); + err = existing_steering_entry(dev, port, steer, + index, qp->qpn); } if (err && link && index != -1) { if (index < dev->caps.num_mgms) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) return err; } - -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) -{ - struct mlx5_cmd_query_special_contexts_mbox_in in; - struct mlx5_cmd_query_special_contexts_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - *rsvd_lkey = be32_to_cpu(out.resd_lkey); - - return err; -} -EXPORT_SYMBOL(mlx5_core_query_special_context); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - u16 rg_saw_cnt; + int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver - * supports runtime PM, it should call pm_runtime_put_noidle() - * in its probe routine and pm_runtime_get_noresume() in its - * remove routine. + * supports runtime PM, it should call pm_runtime_put_noidle(), + * or any other runtime PM helper function decrementing the usage + * count, in its probe routine and pm_runtime_get_noresume() in + * its remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index d9de36ee165d..04e2653bb8c0 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -5,7 +5,7 @@ menu "Performance monitor support" config ARM_PMU - depends on PERF_EVENTS && ARM + depends on PERF_EVENTS && (ARM || ARM64) bool "ARM PMU framework" default y help diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(cmd->request, cmd->request->errors); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 7ff96270c933..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, switch_on_temp = 0; temperature_threshold = control_temp - switch_on_temp; + /* + * estimate_pid_constants() tries to find appropriate default + * values for thermal zones that don't provide them. If a + * system integrator has configured a thermal zone with two + * passive trip points at the same temperature, that person + * hasn't put any effort to set up the thermal zone properly + * so just give up. + */ + if (!temperature_threshold) + return; if (!tz->tzp->k_po || force) tz->tzp->k_po = int_to_frac(sustainable_power) / diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -817,8 +817,9 @@ config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on (X86 || IA64) && PCI select WATCHDOG_CORE + depends on I2C || I2C=n select LPC_ICH if !EXPERT - select I2C_I801 if !EXPERT + select I2C_I801 if !EXPERT && I2C ---help--- Hardware driver for the intel TCO timer based watchdog devices. These drivers are included in the Intel 82801 I/O Controller diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -36,6 +36,13 @@ #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 #define PM_RSTC_RESET 0x00000102 +/* + * The Raspberry Pi firmware uses the RSTS register to know which partiton + * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. + * Partiton 63 is a special partition used by the firmware to indicate halt. + */ +#define PM_RSTS_RASPBERRYPI_HALT 0x555 + #define SECS_TO_WDOG_TICKS(x) ((x) << 16) #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) @@ -151,8 +158,7 @@ static void bcm2835_power_off(void) * hard reset. */ val = readl_relaxed(wdt->base + PM_RSTS); - val &= PM_RSTC_WRCFG_CLR; - val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; + val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; writel_relaxed(val, wdt->base + PM_RSTS); /* Continue with normal reset mechanism */ diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, gef_wdt_ids); static struct platform_driver gef_wdt_driver = { .driver = { diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { { .compatible = "men,a021-wdt" }, { }, }; +MODULE_DEVICE_TABLE(of, a21_wdt_ids); static struct platform_driver a21_wdt_driver = { .probe = a21_wdt_probe, diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c @@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { { .compatible = "moxa,moxart-watchdog" }, { }, }; +MODULE_DEVICE_TABLE(of, moxart_watchdog_match); static struct platform_driver moxart_wdt_driver = { .probe = moxart_wdt_probe, @@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) goto fallback; + sector = bh.b_blocknr << (blkbits - 9); + if (buffer_unwritten(&bh) || buffer_new(&bh)) { int i; + + length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, + bh.b_size); + if (length < 0) { + result = VM_FAULT_SIGBUS; + goto out; + } + if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) + goto fallback; + for (i = 0; i < PTRS_PER_PMD; i++) clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); wmb_pmem(); @@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, result = VM_FAULT_NOPAGE; spin_unlock(ptl); } else { - sector = bh.b_blocknr << (blkbits - 9); length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, bh.b_size); if (length < 0) { diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, { int err; - mutex_lock(&inode->i_mutex); err = security_inode_init_security(inode, dentry, qstr, &init_xattrs, 0); - mutex_unlock(&inode->i_mutex); - if (err) { struct ubifs_info *c = dentry->i_sb->s_fs_info; ubifs_err(c, "cannot initialize security for inode %lu, error %d", diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include <linux/kernel.h> +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -568,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..0f408b002d98 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -217,6 +217,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -145,7 +145,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, - BLK_MQ_F_SYSFS_UP = 1 << 3, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq, int error); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, - void *priv); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -456,6 +456,8 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; + + bool mq_sysfs_init_done; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -int iommu_iova_cache_init(void); -void iommu_iova_cache_destroy(void); +int iova_cache_get(void); +void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..6452ff4c463f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -242,7 +242,6 @@ struct mem_cgroup { * percpu counter. */ struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { u8 rsvd[8]; }; -struct mlx5_cmd_query_special_contexts_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_special_contexts_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 resd_lkey; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); struct mlx5_profile { u64 mask; diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ + page->mem_cgroup = memcg; +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - const int __n = ARRAY_SIZE(__crcu_array); \ - struct rcu_synchronize __rs_array[__n]; \ - \ - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2b0a30a6e31c..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); else if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_start_offset(skb) <= len) + skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; } diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..cb1b9bbda332 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -63,7 +63,11 @@ struct unix_sock { #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; }; -#define unix_sk(__sk) ((struct unix_sock *)__sk) + +static inline struct unix_sock *unix_sk(struct sock *sk) +{ + return (struct unix_sock *)sk; +} #define peer_wait peer_wq.wait diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,8 +11,6 @@ #include <linux/types.h> -#include <linux/compiler.h> - #define UFFD_API ((__u64)0xAA) /* * After implementing the respective features it will become: diff --git a/ipc/msg.c b/ipc/msg.c index 66c4f567eb73..1471db9a7e61 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; - } - msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); diff --git a/ipc/shm.c b/ipc/shm.c index 222131e8e38f..41787276e141 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; @@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; + goto no_id; + } + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); /* diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) rcu_read_lock(); spin_lock(&new->lock); + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) ids->in_use++; - current_euid_egid(&euid, &egid); - new->cuid = new->uid = euid; - new->gid = new->cgid = egid; - if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) PERF_EVENT_STATE_INACTIVE; } -/* - * Called at perf_event creation and when events are attached/detached from a - * group. - */ -static void perf_event__read_size(struct perf_event *event) +static void __perf_event_read_size(struct perf_event *event, int nr_siblings) { int entry = sizeof(u64); /* value */ int size = 0; @@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { - nr += event->group_leader->nr_siblings; + nr += nr_siblings; size += sizeof(u64); } @@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) event->read_size = size; } -static void perf_event__header_size(struct perf_event *event) +static void __perf_event_header_size(struct perf_event *event, u64 sample_type) { struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; u16 size = 0; - perf_event__read_size(event); - if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); @@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) event->header_size = size; } +/* + * Called at perf_event creation and when events are attached/detached from a + * group. + */ +static void perf_event__header_size(struct perf_event *event) +{ + __perf_event_read_size(event, + event->group_leader->nr_siblings); + __perf_event_header_size(event, event->attr.sample_type); +} + static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; @@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) event->id_header_size = size; } +static bool perf_event_validate_size(struct perf_event *event) +{ + /* + * The values computed here will be over-written when we actually + * attach the event. + */ + __perf_event_read_size(event, event->group_leader->nr_siblings + 1); + __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); + perf_event__id_header_size(event); + + /* + * Sum the lot; should not exceed the 64k limit we have on records. + * Conservative limit to allow for callchains and other variable fields. + */ + if (event->read_size + event->header_size + + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) + return false; + + return true; +} + static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; @@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, if (move_group) { gctx = group_leader->ctx; + mutex_lock_double(&gctx->mutex, &ctx->mutex); + } else { + mutex_lock(&ctx->mutex); + } + if (!perf_event_validate_size(event)) { + err = -E2BIG; + goto err_locked; + } + + /* + * Must be under the same ctx::mutex as perf_install_in_context(), + * because we need to serialize with concurrent event creation. + */ + if (!exclusive_event_installable(event, ctx)) { + /* exclusive and group stuff are assumed mutually exclusive */ + WARN_ON_ONCE(move_group); + + err = -EBUSY; + goto err_locked; + } + + WARN_ON_ONCE(ctx->parent_ctx); + + if (move_group) { /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ - mutex_lock_double(&gctx->mutex, &ctx->mutex); - perf_remove_from_context(group_leader, false); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, perf_remove_from_context(sibling, false); put_ctx(gctx); } - } else { - mutex_lock(&ctx->mutex); - } - WARN_ON_ONCE(ctx->parent_ctx); - - if (move_group) { /* * Wait for everybody to stop referencing the events through * the old lists, before installing it on new lists. @@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); - } - if (!exclusive_event_installable(event, ctx)) { - err = -EBUSY; - mutex_unlock(&ctx->mutex); - fput(event_file); - goto err_context; + /* + * Now that all events are installed in @ctx, nothing + * references @gctx anymore, so drop the last reference we have + * on it. + */ + put_ctx(gctx); } + /* + * Precalculate sample_data sizes; do while holding ctx::mutex such + * that we're serialized against further additions and before + * perf_install_in_context() which is the point the event is active and + * can use these values. + */ + perf_event__header_size(event); + perf_event__id_header_size(event); + perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); - if (move_group) { + if (move_group) mutex_unlock(&gctx->mutex); - put_ctx(gctx); - } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -8376,12 +8424,6 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(¤t->perf_event_mutex); /* - * Precalculate sample_data sizes - */ - perf_event__header_size(event); - perf_event__id_header_size(event); - - /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in @@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; +err_locked: + if (move_group) + mutex_unlock(&gctx->mutex); + mutex_unlock(&ctx->mutex); +/* err_file: */ + fput(event_file); err_context: perf_unpin_context(ctx); put_ctx(ctx); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index e3a8c9577ba6..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -12,6 +12,7 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/mutex.h> #include "internals.h" @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) void register_irq_proc(unsigned int irq, struct irq_desc *desc) { + static DEFINE_MUTEX(register_lock); char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) return; + /* + * irq directories are registered only when a handler is + * added, not when the descriptor is created, so multiple + * tasks might try to register at the same time. + */ + mutex_lock(®ister_lock); + + if (desc->dir) + goto out_unlock; + memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - return; + goto out_unlock; #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); + +out_unlock: + mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, - int references) + int references, int pin_count) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif - hlock->pin_count = 0; + hlock->pin_count = pin_count; if (check && !mark_irqflags(curr, hlock)) return 0; @@ -3343,7 +3343,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3433,7 +3433,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip, 0); + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { + static struct lock_class_key rcu_exp_sched_rdp_class; unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); @@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) mutex_init(&rdp->exp_funnel_mutex); rcu_boot_init_nocb_percpu_data(rdp); raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (rsp == &rcu_sched_state) + lockdep_set_class_and_name(&rdp->exp_funnel_mutex, + &rcu_exp_sched_rdp_class, + "rcu_data_exp_sched"); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2f9c92884817..615953141951 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4934,7 +4934,15 @@ void init_idle(struct task_struct *idle, int cpu) idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - do_set_cpus_allowed(idle, cpumask_of(cpu)); +#ifdef CONFIG_SMP + /* + * Its possible that init_idle() gets called multiple times on a task, + * in that case do_set_cpus_allowed() will not do the right thing. + * + * And since this is boot we can forgo the serialization. + */ + set_cpus_allowed_common(idle, cpumask_of(cpu)); +#endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -4951,7 +4959,7 @@ void init_idle(struct task_struct *idle, int cpu) rq->curr = rq->idle = idle; idle->on_rq = TASK_ON_RQ_QUEUED; -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP idle->on_cpu = 1; #endif raw_spin_unlock(&rq->lock); @@ -4966,7 +4974,7 @@ void init_idle(struct task_struct *idle, int cpu) idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) continue; /* Check the deviation from the watchdog clocksource. */ - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", cs->name); pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..8dbb7b1eab50 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,10 @@ #include <linux/bug.h> #include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/word-at-a-time.h> +#include <asm/page.h> + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -146,6 +150,90 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + *(unsigned long *)(dest+res) = c; + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + return res + find_zero(data); + } + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; - if (dma < (page->dma + pool->allocation)) + if ((dma - page->dma) < pool->allocation) return page; } return NULL; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, continue; /* + * Shared VMAs have their own reserves and do not affect + * MAP_PRIVATE accounting but it is possible that a shared + * VMA is using the same page so check and skip such VMAs. + */ + if (iter_vma->vm_flags & VM_MAYSHARE) + continue; + + /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..1fedbde68f59 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) } /* + * Return page count for single (non recursive) @memcg. + * * Implementation Note: reading percpu statistics for memcg. * * Both of vmstat[] and percpu_counter has threshold and do periodic * synchronization to implement "quick" read. There are trade-off between * reading cost and precision of value. Then, we may have a chance to implement - * a periodic synchronizion of counter in memcg's counter. + * a periodic synchronization of counter in memcg's counter. * * But this _read() function is used for user interface now. The user accounts * memory usage by memory cgroup and he _always_ requires exact value because @@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) * * If there are kernel internal actions which can make use of some not-exact * value, and reading all cpu value can be performance bottleneck in some - * common workload, threashold and synchonization as vmstat[] should be + * common workload, threshold and synchronization as vmstat[] should be * implemented. */ -static long mem_cgroup_read_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long +mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { long val = 0; int cpu; + /* Per-cpu values can be negative, use a signed accumulator */ for_each_possible_cpu(cpu) val += per_cpu(memcg->stat->count[idx], cpu); + /* + * Summing races with updates, so val may be negative. Avoid exposing + * transient negative values. + */ + if (val < 0) + val = 0; return val; } @@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], K(mem_cgroup_read_stat(iter, i))); } @@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; - long val = 0; + unsigned long val = 0; - /* Per-cpu values can be negative, use a signed accumulator */ for_each_mem_cgroup_tree(iter, memcg) val += mem_cgroup_read_stat(iter, idx); - if (val < 0) /* race ? */ - val = 0; return val; } @@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); } @@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - long long val = 0; + unsigned long long val = 0; if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; for_each_mem_cgroup_tree(mi, memcg) val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); } for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { @@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg_wb_domain_init(memcg, GFP_KERNEL)) goto out_free_stat; - spin_lock_init(&memcg->pcp_counter_lock); return memcg; out_free_stat: diff --git a/mm/migrate.c b/mm/migrate.c index 7452a00bbb50..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (PageSwapBacked(page)) SetPageSwapBacked(newpage); + /* + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus + * needs newpage's memcg set to transfer memcg dirty page accounting. + * So perform memcg migration in two steps: + * 1. set newpage->mem_cgroup (here) + * 2. clear page->mem_cgroup (below) + */ + set_page_memcg(newpage, page_memcg(page)); + mapping = page_mapping(page); if (!mapping) rc = migrate_page(mapping, newpage, page, mode); @@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, rc = fallback_migrate_page(mapping, newpage, page, mode); if (rc != MIGRATEPAGE_SUCCESS) { + set_page_memcg(newpage, NULL); newpage->mapping = NULL; } else { - mem_cgroup_migrate(page, newpage, false); + set_page_memcg(page, NULL); if (page_was_mapped) remove_migration_ptes(page, newpage); page->mapping = NULL; diff --git a/mm/slab.c b/mm/slab.c index c77ebe6cc87c..4fcc5dd8d5a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= kmalloc_size(INDEX_NODE + 1) - && cachep->object_size > cache_line_size() - && ALIGN(size, cachep->align) < PAGE_SIZE) { + /* + * To activate debug pagealloc, off-slab management is necessary + * requirement. In early phase of initialization, small sized slab + * doesn't get initialized so it would not be possible. So, we need + * to check size >= 256. It guarantees that all necessary small + * sized slab is initialized in current slab initialization sequence. + */ + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + size >= 256 && cachep->object_size > cache_line_size() && + ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 805a95a48107..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -31,7 +31,6 @@ static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; -static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; @@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) - ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); + ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); */ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { + unsigned char *data = skb->data; + BUG_ON(len > skb->len); - skb->len -= len; - BUG_ON(skb->len < skb->data_len); - skb_postpull_rcsum(skb, skb->data, len); - return skb->data += len; + __skb_pull(skb, len); + skb_postpull_rcsum(skb, data, len); + return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) static int dsa_slave_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) { - int ret = 0; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; switch (attr->id) { case SWITCHDEV_ATTR_PORT_STP_STATE: - if (attr->trans == SWITCHDEV_TRANS_COMMIT) - ret = dsa_slave_stp_update(dev, attr->u.stp_state); + if (attr->trans == SWITCHDEV_TRANS_PREPARE) + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; + else + ret = ds->drv->port_stp_update(ds, p->port, + attr->u.stp_state); break; default: ret = -EOPNOTSUPP; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; + fl4.flowi4_flags = 0; no_addr = idev->ifa_list == NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c6ad99ad0ffb..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; + fl4.flowi4_flags = 0; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res, 0); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f204089e854c..cb32ce250db0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, fl6->flowi6_iif = LOOPBACK_IFINDEX; - if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || + fl6->flowi6_oif) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl6->saddr)) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) tunnel = container_of(work, struct l2tp_tunnel, del_work); sk = l2tp_tunnel_sock_lookup(tunnel); if (!sk) - return; + goto out; sock = sk->sk_socket; @@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } l2tp_tunnel_sock_put(sk); +out: + l2tp_tunnel_dec_refcount(tunnel); } /* Create a socket for the tunnel, if one isn't set up by @@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { + l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_closeall(tunnel); - return (false == queue_work(l2tp_wq, &tunnel->del_work)); + if (false == queue_work(l2tp_wq, &tunnel->del_work)) { + l2tp_tunnel_dec_refcount(tunnel); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, * within this document. * * Our basic strategy is to round-robin transports in priorities - * according to sctp_state_prio_map[] e.g., if no such + * according to sctp_trans_score() e.g., if no such * transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. */ -static const u8 sctp_trans_state_to_prio_map[] = { - [SCTP_ACTIVE] = 3, /* best case */ - [SCTP_UNKNOWN] = 2, - [SCTP_PF] = 1, - [SCTP_INACTIVE] = 0, /* worst case */ -}; - static u8 sctp_trans_score(const struct sctp_transport *trans) { - return sctp_trans_state_to_prio_map[trans->state]; + switch (trans->state) { + case SCTP_ACTIVE: + return 3; /* best case */ + case SCTP_UNKNOWN: + return 2; + case SCTP_PF: + return 1; + default: /* case SCTP_INACTIVE */ + return 0; /* worst case */ + } } static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); /* Check whether a task is in the sock. */ - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) transport, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -285,11 +286,12 @@ out_unlock: static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); int error = 0; - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); @@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, (void *)timeout_type, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } @@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); - if (error) - asoc->base.sk->sk_err = -error; + if (error) + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -39,25 +39,6 @@ static int fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; - struct ib_mr *mr; - - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - } else { - mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(mr)) { - pr_err("%s: ib_get_dma_mr for failed with %lX\n", - __func__, PTR_ERR(mr)); - return -ENOMEM; - } - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; - ia->ri_dma_mr = mr; - } - return 0; } diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct ib_device_attr *devattr = &ia->ri_devattr; int depth, delta; - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, devattr->max_fast_reg_page_list_len); diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -23,7 +23,6 @@ static int physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_mr *mr; /* Obtain an rkey to use for RPC data payloads. @@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, __func__, PTR_ERR(mr)); return -ENOMEM; } - ia->ri_dma_mr = mr; - - /* Obtain an lkey to use for regbufs. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - else - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; + ia->ri_dma_mr = mr; return 0; } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..eb081ad05e33 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1252,7 +1252,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) goto out_free; iov->length = size; - iov->lkey = ia->ri_dma_lkey; + iov->lkey = ia->ri_pd->local_dma_lkey; rb->rg_size = size; rb->rg_owner = NULL; return rb; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -65,7 +65,6 @@ struct rpcrdma_ia { struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; struct ib_mr *ri_dma_mr; - u32 ri_dma_lkey; struct completion ri_done; int ri_async_rc; unsigned int ri_max_frmr_depth; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..ef31b40ad550 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2179,8 +2179,21 @@ unlock: if (UNIXCB(skb).fp) scm.fp = scm_fp_dup(UNIXCB(skb).fp); - sk_peek_offset_fwd(sk, chunk); + if (skip) { + sk_peek_offset_fwd(sk, chunk); + skip -= chunk; + } + if (UNIXCB(skb).fp) + break; + + last = skb; + last_len = skb->len; + unix_state_lock(sk); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (skb) + goto again; + unix_state_unlock(sk); break; } } while (size); diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c @@ -1,13 +1,13 @@ /* * Here's a sample kernel module showing the use of jprobes to dump - * the arguments of do_fork(). + * the arguments of _do_fork(). * * For more information on theory of operation of jprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the - * console whenever do_fork() is invoked to create a new process. + * console whenever _do_fork() is invoked to create a new process. * (Some messages may be suppressed if syslogd is configured to * eliminate duplicate messages.) */ @@ -17,13 +17,13 @@ #include <linux/kprobes.h> /* - * Jumper probe for do_fork. + * Jumper probe for _do_fork. * Mirror principle enables access to arguments of the probed routine * from the probe handler. */ -/* Proxy routine having the same arguments as actual do_fork() routine */ -static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, +/* Proxy routine having the same arguments as actual _do_fork() routine */ +static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { @@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, } static struct jprobe my_jprobe = { - .entry = jdo_fork, + .entry = j_do_fork, .kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }, }; diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -1,13 +1,13 @@ /* * NOTE: This example is works on x86 and powerpc. * Here's a sample kernel module showing the use of kprobes to dump a - * stack trace and selected registers when do_fork() is called. + * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see * Documentation/kprobes.txt * * You will see the trace data in /var/log/messages and on the console - * whenever do_fork() is invoked to create a new process. + * whenever _do_fork() is invoked to create a new process. */ #include <linux/kernel.h> @@ -16,7 +16,7 @@ /* For each probe you need to allocate a kprobe structure */ static struct kprobe kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }; /* kprobe pre_handler: called just before the probed instruction is executed */ diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -7,7 +7,7 @@ * * usage: insmod kretprobe_example.ko func=<func_name> * - * If no func_name is specified, do_fork is instrumented + * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt @@ -25,7 +25,7 @@ #include <linux/limits.h> #include <linux/sched.h> -static char func_name[NAME_MAX] = "do_fork"; +static char func_name[NAME_MAX] = "_do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 6ce5945a0b89..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -17,13 +17,9 @@ #include <stdint.h> #include <stdbool.h> #include <string.h> -#include <getopt.h> #include <err.h> -#include <arpa/inet.h> #include <openssl/bio.h> -#include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/pkcs7.h> #include <openssl/err.h> #include <openssl/engine.h> diff --git a/scripts/sign-file.c b/scripts/sign-file.c index c3899ca4811c..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -20,13 +20,34 @@ #include <getopt.h> #include <err.h> #include <arpa/inet.h> +#include <openssl/opensslv.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/pem.h> -#include <openssl/cms.h> #include <openssl/err.h> #include <openssl/engine.h> +/* + * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to + * assume that it's not available and its header file is missing and that we + * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts + * the options we have on specifying the X.509 certificate we want. + * + * Further, older versions of OpenSSL don't support manually adding signers to + * the PKCS#7 message so have to accept that we get a certificate included in + * the signature message. Nor do such older versions of OpenSSL support + * signing with anything other than SHA1 - so we're stuck with that if such is + * the case. + */ +#if OPENSSL_VERSION_NUMBER < 0x10000000L +#define USE_PKCS7 +#endif +#ifndef USE_PKCS7 +#include <openssl/cms.h> +#else +#include <openssl/pkcs7.h> +#endif + struct module_signature { uint8_t algo; /* Public-key crypto algorithm [0] */ uint8_t hash; /* Digest algorithm [0] */ @@ -110,30 +131,42 @@ int main(int argc, char **argv) struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; char *hash_algo = NULL; char *private_key_name, *x509_name, *module_name, *dest_name; - bool save_cms = false, replace_orig; + bool save_sig = false, replace_orig; bool sign_only = false; unsigned char buf[4096]; - unsigned long module_size, cms_size; - unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; + unsigned long module_size, sig_size; + unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#ifndef USE_PKCS7 CMS_ContentInfo *cms; + unsigned int use_keyid = 0; +#else + PKCS7 *pkcs7; +#endif X509 *x509; BIO *b, *bd = NULL, *bm; int opt, n; - OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); ERR_clear_error(); key_pass = getenv("KBUILD_SIGN_PIN"); +#ifndef USE_PKCS7 + use_signed_attrs = CMS_NOATTR; +#else + use_signed_attrs = PKCS7_NOATTR; +#endif + do { opt = getopt(argc, argv, "dpk"); switch (opt) { - case 'p': save_cms = true; break; - case 'd': sign_only = true; save_cms = true; break; + case 'p': save_sig = true; break; + case 'd': sign_only = true; save_sig = true; break; +#ifndef USE_PKCS7 case 'k': use_keyid = CMS_USE_KEYID; break; +#endif case -1: break; default: format(); } @@ -157,6 +190,14 @@ int main(int argc, char **argv) replace_orig = true; } +#ifdef USE_PKCS7 + if (strcmp(hash_algo, "sha1") != 0) { + fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", + OPENSSL_VERSION_TEXT); + exit(3); + } +#endif + /* Read the private key and the X.509 cert the PKCS#7 message * will point to. */ @@ -213,7 +254,8 @@ int main(int argc, char **argv) bm = BIO_new_file(module_name, "rb"); ERR(!bm, "%s", module_name); - /* Load the CMS message from the digest buffer. */ +#ifndef USE_PKCS7 + /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); ERR(!cms, "CMS_sign"); @@ -221,17 +263,31 @@ int main(int argc, char **argv) ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | use_keyid | use_signed_attrs), - "CMS_sign_add_signer"); + "CMS_add1_signer"); ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, "CMS_final"); - if (save_cms) { - char *cms_name; +#else + pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, + PKCS7_NOCERTS | PKCS7_BINARY | + PKCS7_DETACHED | use_signed_attrs); + ERR(!pkcs7, "PKCS7_sign"); +#endif - ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); - b = BIO_new_file(cms_name, "wb"); - ERR(!b, "%s", cms_name); - ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); + if (save_sig) { + char *sig_file_name; + + ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, + "asprintf"); + b = BIO_new_file(sig_file_name, "wb"); + ERR(!b, "%s", sig_file_name); +#ifndef USE_PKCS7 + ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, + "%s", sig_file_name); +#else + ERR(i2d_PKCS7_bio(b, pkcs7) < 0, + "%s", sig_file_name); +#endif BIO_free(b); } @@ -247,9 +303,13 @@ int main(int argc, char **argv) ERR(n < 0, "%s", module_name); module_size = BIO_number_written(bd); +#ifndef USE_PKCS7 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); - cms_size = BIO_number_written(bd) - module_size; - sig_info.sig_len = htonl(cms_size); +#else + ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); +#endif + sig_size = BIO_number_written(bd) - module_size; + sig_info.sig_len = htonl(sig_size); ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..39eac1fd5706 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); + /* Throw away the key data */ + if (key->type->destroy) + key->type->destroy(key); + security_key_free(key); /* deal with the user's key tracking and quota */ @@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - key_user_put(key->user); kfree(key->description); diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ libelf-getphdrnum \ libelf-mmap \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libpython-version \ @@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ timerfd \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid FEATURE_DISPLAY ?= \ dwarf \ @@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ libbfd \ libelf \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libslang \ libunwind \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. # If in the future we need per-feature checks/flags for features not diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -19,6 +19,7 @@ FILES= \ test-libelf-getphdrnum.bin \ test-libelf-mmap.bin \ test-libnuma.bin \ + test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ test-libpython-version.bin \ @@ -34,7 +35,8 @@ FILES= \ test-compile-x32.bin \ test-zlib.bin \ test-lzma.bin \ - test-bpf.bin + test-bpf.bin \ + test-get_cpuid.bin CC := $(CROSS_COMPILE)gcc -MD PKG_CONFIG := $(CROSS_COMPILE)pkg-config @@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: test-libnuma.bin: $(BUILD) -lnuma +test-numa_num_possible_cpus.bin: + $(BUILD) -lnuma + test-libunwind.bin: $(BUILD) -lelf @@ -162,6 +167,9 @@ test-zlib.bin: test-lzma.bin: $(BUILD) -llzma +test-get_cpuid.bin: + $(BUILD) + test-bpf.bin: $(BUILD) diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -77,6 +77,10 @@ # include "test-libnuma.c" #undef main +#define main main_test_numa_num_possible_cpus +# include "test-numa_num_possible_cpus.c" +#undef main + #define main main_test_timerfd # include "test-timerfd.c" #undef main @@ -117,6 +121,10 @@ # include "test-lzma.c" #undef main +#define main main_test_get_cpuid +# include "test-get_cpuid.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -136,6 +144,7 @@ int main(int argc, char *argv[]) main_test_libbfd(); main_test_backtrace(); main_test_libnuma(); + main_test_numa_num_possible_cpus(); main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); @@ -143,6 +152,7 @@ int main(int argc, char *argv[]) main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_lzma(); + main_test_get_cpuid(); return 0; } diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c @@ -0,0 +1,7 @@ +#include <cpuid.h> + +int main(void) +{ + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); +} diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c @@ -0,0 +1,6 @@ +#include <numa.h> + +int main(void) +{ + return numa_num_possible_cpus(); +} diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct format_field *field; struct printk_map *printk; long long val, fval; - unsigned long addr; + unsigned long long addr; char *str; unsigned char *hex; int print; @@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, */ if (!(field->flags & FIELD_IS_ARRAY) && field->size == pevent->long_size) { - addr = *(unsigned long *)(data + field->offset); + + /* Handle heterogeneous recording and processing + * architectures + * + * CASE I: + * Traces recorded on 32-bit devices (32-bit + * addressing) and processed on 64-bit devices: + * In this case, only 32 bits should be read. + * + * CASE II: + * Traces recorded on 64 bit devices and processed + * on 32-bit devices: + * In this case, 64 bits must be read. + */ + addr = (pevent->long_size == 8) ? + *(unsigned long long *)(data + field->offset) : + (unsigned long long)*(unsigned int *)(data + field->offset); + /* Check if it matches a print format */ printk = find_printk(pevent, addr); if (printk) trace_seq_puts(s, printk->printk); else - trace_seq_printf(s, "%lx", addr); + trace_seq_printf(s, "%llx", addr); break; } str = malloc(len + 1); diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc CYC packets are not requested by default. -no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. - - It stops the driver resetting the byte count to zero whenever - enabling the trace (for example on context switches) which in - turn results in no PSB being forced. However some processors - will produce a PSB anyway. - - In any case, there is still a PSB when the trace is enabled for - the first time. - - no_force_psb can be used to slightly decrease the trace size but - may make it harder for the decoder to recover from errors. - - no_force_psb is not selected by default. - new snapshot option ------------------- diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -573,9 +573,14 @@ ifndef NO_LIBNUMA msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); NO_LIBNUMA := 1 else - CFLAGS += -DHAVE_LIBNUMA_SUPPORT - EXTLIBS += -lnuma - $(call detected,CONFIG_NUMA) + ifeq ($(feature-numa_num_possible_cpus), 0) + msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); + NO_LIBNUMA := 1 + else + CFLAGS += -DHAVE_LIBNUMA_SUPPORT + EXTLIBS += -lnuma + $(call detected,CONFIG_NUMA) + endif endif endif @@ -621,8 +626,13 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - $(call detected,CONFIG_AUXTRACE) - CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-get_cpuid), 0) + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + NO_AUXTRACE := 1 + else + $(call detected,CONFIG_AUXTRACE) + CFLAGS += -DHAVE_AUXTRACE_SUPPORT + endif endif # Among the variables below, these: diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) int ret = 0; if (module) { - list_for_each_entry(dso, &host_machine->dsos.head, node) { - if (!dso->kernel) - continue; - if (strncmp(dso->short_name + 1, module, - dso->short_name_len - 2) == 0) - goto found; + char module_name[128]; + + snprintf(module_name, sizeof(module_name), "[%s]", module); + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + if (map) { + dso = map->dso; + goto found; } pr_debug("Failed to find module %s.\n", module); return -ENOENT; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, file_offset = page_offset; head = data_offset - page_offset; - if (data_size && (data_offset + data_size < file_size)) + if (data_size == 0) + goto out; + + if (data_offset + data_size < file_size) file_size = data_offset + data_size; ui_progress__init(&prog, file_size, "Processing events..."); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); } -static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) +static int check_per_pkg(struct perf_evsel *counter, + struct perf_counts_values *vals, int cpu, bool *skip) { unsigned long *mask = counter->per_pkg_mask; struct cpu_map *cpus = perf_evsel__cpus(counter); @@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) counter->per_pkg_mask = mask; } + /* + * we do not consider an event that has not run as a good + * instance to mark a package as used (skip=1). Otherwise + * we may run into a situation where the first CPU in a package + * is not running anything, yet the second is, and this function + * would mark the package as used after the first CPU and would + * not read the values from the second CPU. + */ + if (!(vals->run && vals->ena)) + return 0; + s = cpu_map__get_socket(cpus, cpu); if (s < 0) return -1; @@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel static struct perf_counts_values zero; bool skip = false; - if (check_per_pkg(evsel, cpu, &skip)) { + if (check_per_pkg(evsel, count, cpu, &skip)) { pr_err("failed to read per-pkg counter\n"); return -1; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, #endif #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT -int elf_getphdrnum(Elf *elf, size_t *dst) +static int elf_getphdrnum(Elf *elf, size_t *dst) { GElf_Ehdr gehdr; GElf_Ehdr *ehdr; @@ -1271,8 +1271,6 @@ out_close: static int kcore__init(struct kcore *kcore, char *filename, int elfclass, bool temp) { - GElf_Ehdr *ehdr; - kcore->elfclass = elfclass; if (temp) @@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, if (!gelf_newehdr(kcore->elf, elfclass)) goto out_end; - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); - if (!ehdr) - goto out_end; + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); return 0; @@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, u64 addr, u64 len) { - GElf_Phdr gphdr; - GElf_Phdr *phdr; - - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); - if (!phdr) - return -1; - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = offset; - phdr->p_vaddr = addr; - phdr->p_paddr = 0; - phdr->p_filesz = len; - phdr->p_memsz = len; - phdr->p_align = page_size; - - if (!gelf_update_phdr(kcore->elf, idx, phdr)) + GElf_Phdr phdr = { + .p_type = PT_LOAD, + .p_flags = PF_R | PF_W | PF_X, + .p_offset = offset, + .p_vaddr = addr, + .p_paddr = 0, + .p_filesz = len, + .p_memsz = len, + .p_align = page_size, + }; + + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) return -1; return 0; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -709,7 +709,7 @@ bool find_process(const char *name) dir = opendir(procfs__mountpoint()); if (!dir) - return -1; + return false; /* Walk through the directory. */ while (ret && (d = readdir(dir)) != NULL) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; +unsigned int aperf_mperf_multiplier = 1; int do_smi; double bclk; +double base_hz; +double tsc_tweak = 1.0; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; @@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* %Busy */ if (has_aperf) { if (!skip_c0) - outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); else outp += sprintf(outp, "********"); } @@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* Bzy_MHz */ if (has_aperf) outp += sprintf(outp, "%8.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); /* TSC_MHz */ outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); @@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; + t->aperf = t->aperf * aperf_mperf_multiplier; + t->mperf = t->mperf * aperf_mperf_multiplier; } if (do_smi) { @@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; + +static void +calculate_tsc_tweak() +{ + unsigned long long msr; + unsigned int base_ratio; + + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); + base_ratio = (msr >> 8) & 0xFF; + base_hz = base_ratio * bclk * 1000000; + tsc_tweak = base_hz / tsc_hz; +} + static void dump_nhm_platform_info(void) { @@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) switch (model) { case 0x3A: /* IVB */ - case 0x3E: /* IVB Xeon */ - case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ @@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) return 0; } +unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) +{ + if (is_knl(family, model)) + return 1024; + return 1; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2744,6 +2767,9 @@ void process_cpuid() } } + if (has_aperf) + aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); + do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); @@ -2762,6 +2788,9 @@ void process_cpuid() if (debug) dump_cstate_pstate_config_info(); + if (has_skl_msrs(family, model)) + calculate_tsc_tweak(); + return; } @@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.7 17-June, 2015" + fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" " - Len Brown <lenb@kernel.org>\n"); } |