diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 16:36:57 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 16:36:57 -0800 |
commit | bcd49c3dd172c38e14faf151adca63c8db4c9557 (patch) | |
tree | d0888cddba6aec424a52450f5cb84768f0377318 | |
parent | f14b5f05cde1e22f4a99d7bc0f9d61f024f21f70 (diff) | |
parent | 2e7614c0736de93c8796bb2d58debb8871a59db8 (diff) |
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar:
"Various cleanups and simplifications, none of them really stands out,
they are all over the place"
* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/uaccess: Remove unused __addr_ok() macro
x86/smpboot: Remove unused phys_id variable
x86/mm/dump_pagetables: Remove the unused prev_pud variable
x86/fpu: Move init_xstate_size() to __init section
x86/cpu_entry_area: Move percpu_setup_debug_store() to __init section
x86/mtrr: Remove unused variable
x86/boot/compressed/64: Explain paging_prepare()'s return value
x86/resctrl: Remove duplicate MSR_MISC_FEATURE_CONTROL definition
x86/asm/suspend: Drop ENTRY from local data
x86/hw_breakpoints, kprobes: Remove kprobes ifdeffery
x86/boot: Save several bytes in decompressor
x86/trap: Remove useless declaration
x86/mm/tlb: Remove unused cpu variable
x86/events: Mark expected switch-case fall-throughs
x86/asm-prototypes: Remove duplicate include <asm/page.h>
x86/kernel: Mark expected switch-case fall-throughs
x86/insn-eval: Mark expected switch-case fall-through
x86/platform/UV: Replace kmalloc() and memset() with k[cz]alloc() calls
x86/e820: Replace kmalloc() + memcpy() with kmemdup()
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 11 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/asm-prototypes.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_32.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_64.S | 12 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cacheinfo.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/cleanup.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/xstate.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/kgdb.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/uprobes.c | 1 | ||||
-rw-r--r-- | arch/x86/lib/insn-eval.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/cpu_entry_area.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 3 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 8 | ||||
-rw-r--r-- | include/linux/kprobes.h | 5 |
24 files changed, 38 insertions, 55 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index f62e347862cc..fafb75c6c592 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -358,8 +358,11 @@ ENTRY(startup_64) * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. * - * Address of the trampoline is returned in RAX. - * Non zero RDX on return means we need to enable 5-level paging. + * paging_prepare() returns a two-quadword structure which lands + * into RDX:RAX: + * - Address of the trampoline is returned in RAX. + * - Non zero RDX means trampoline needs to enable 5-level + * paging. * * RSI holds real mode data and needs to be preserved across * this function call. @@ -565,7 +568,7 @@ adjust_got: * * RDI contains the return address (might be above 4G). * ECX contains the base address of the trampoline memory. - * Non zero RDX on return means we need to enable 5-level paging. + * Non zero RDX means trampoline needs to enable 5-level paging. */ ENTRY(trampoline_32bit_src) /* Set up data and stack segments */ @@ -655,8 +658,6 @@ no_longmode: .data gdt64: .word gdt_end - gdt - .long 0 - .word 0 .quad 0 gdt: .word gdt_end - gdt diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 17096d3cd616..7ec265bacb6a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4220,6 +4220,8 @@ __init int intel_pmu_init(void) case INTEL_FAM6_CORE2_MEROM: x86_add_quirk(intel_clovertown_quirk); + /* fall through */ + case INTEL_FAM6_CORE2_MEROM_L: case INTEL_FAM6_CORE2_PENRYN: case INTEL_FAM6_CORE2_DUNNINGTON: diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c88ed39582a1..580c1b91c454 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -931,6 +931,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_ZERO_CALL; break; } + /* fall through */ case 0x9a: /* call far absolute */ ret = X86_BR_CALL; break; diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 1908214b9125..ce92c4acc913 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -7,7 +7,6 @@ #include <asm-generic/asm-prototypes.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/special_insns.h> #include <asm/preempt.h> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 33051436c864..2bb3a648fc12 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -742,7 +742,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, extern void enable_sep_cpu(void); extern int sysenter_setup(void); -void early_trap_pf_init(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 62004d22524a..1954dd5552a2 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -34,10 +34,7 @@ static inline void set_fs(mm_segment_t fs) } #define segment_eq(a, b) ((a).seg == (b).seg) - #define user_addr_max() (current->thread.addr_limit.seg) -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 0c26b1b44e51..4203d4f0c68d 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -90,7 +90,7 @@ ret_point: .data ALIGN ENTRY(saved_magic) .long 0 -ENTRY(saved_eip) .long 0 +saved_eip: .long 0 # saved registers saved_idt: .long 0,0 diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 50b8ed0317a3..510fa12aab73 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -125,12 +125,12 @@ ENTRY(do_suspend_lowlevel) ENDPROC(do_suspend_lowlevel) .data -ENTRY(saved_rbp) .quad 0 -ENTRY(saved_rsi) .quad 0 -ENTRY(saved_rdi) .quad 0 -ENTRY(saved_rbx) .quad 0 +saved_rbp: .quad 0 +saved_rsi: .quad 0 +saved_rdi: .quad 0 +saved_rbx: .quad 0 -ENTRY(saved_rip) .quad 0 -ENTRY(saved_rsp) .quad 0 +saved_rip: .quad 0 +saved_rsp: .quad 0 ENTRY(saved_magic) .quad 0 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2953bbf05c08..264e3221d923 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -812,6 +812,7 @@ static int irq_polarity(int idx) return IOAPIC_POL_HIGH; case MP_IRQPOL_RESERVED: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); + /* fall through */ case MP_IRQPOL_ACTIVE_LOW: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_POL_LOW; @@ -859,6 +860,7 @@ static int irq_trigger(int idx) return IOAPIC_EDGE; case MP_IRQTRIG_RESERVED: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); + /* fall through */ case MP_IRQTRIG_LEVEL: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_LEVEL; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index c4d1023fb0ab..395d46f78582 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -248,6 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, switch (leaf) { case 1: l1 = &l1i; + /* fall through */ case 0: if (!l1->val) return; diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 3668c5df90c6..5bd011737272 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -296,7 +296,7 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, unsigned long sizek) { unsigned long hole_basek, hole_sizek; - unsigned long second_basek, second_sizek; + unsigned long second_sizek; unsigned long range0_basek, range0_sizek; unsigned long range_basek, range_sizek; unsigned long chunk_sizek; @@ -304,7 +304,6 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, hole_basek = 0; hole_sizek = 0; - second_basek = 0; second_sizek = 0; chunk_sizek = state->chunk_sizek; gran_sizek = state->gran_sizek; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 14bed6af8377..604c0e3bcc83 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -34,13 +34,6 @@ #include "pseudo_lock_event.h" /* - * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware - * prefetcher state. Details about this register can be found in the MSR - * tables for specific platforms found in Intel's SDM. - */ -#define MSR_MISC_FEATURE_CONTROL 0x000001a4 - -/* * The bits needed to disable hardware prefetching varies based on the * platform. During initialization we will discover which bits to use. */ diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 50895c2f937d..a687d10da417 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -671,21 +671,18 @@ __init void e820__reallocate_tables(void) int size; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table, size); e820_table = n; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table_kexec, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table_kexec, size); e820_table_kexec = n; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table_firmware, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table_firmware, size); e820_table_firmware = n; } diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 9cc108456d0b..d7432c2b1051 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -669,7 +669,7 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size) return false; } -static int init_xstate_size(void) +static int __init init_xstate_size(void) { /* Recompute the context size for enabled features: */ unsigned int possible_xstate_size; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 34a5c1715148..ff9bfd40429e 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -261,12 +261,8 @@ static int arch_build_bp_info(struct perf_event *bp, * allow kernel breakpoints at all. */ if (attr->bp_addr >= TASK_SIZE_MAX) { -#ifdef CONFIG_KPROBES if (within_kprobe_blacklist(attr->bp_addr)) return -EINVAL; -#else - return -EINVAL; -#endif } hw->type = X86_BREAKPOINT_EXECUTE; @@ -279,6 +275,7 @@ static int arch_build_bp_info(struct perf_event *bp, hw->len = X86_BREAKPOINT_LEN_X; return 0; } + /* fall through */ default: return -EINVAL; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5db08425063e..4ff6b4cdb941 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -467,6 +467,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; + /* fall through */ case 'D': case 'k': /* clear the trace bit */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c91ff9f9fe8a..ce1a67b70168 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -150,7 +150,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ static void smp_callin(void) { - int cpuid, phys_id; + int cpuid; /* * If waken up by an INIT in an 82489DX configuration @@ -161,11 +161,6 @@ static void smp_callin(void) cpuid = smp_processor_id(); /* - * (This works even if the APIC is not enabled.) - */ - phys_id = read_apic_id(); - - /* * the boot CPU has finished the init stage and is spinning * on callin_map until we finish. We are free to set up this * CPU, first the APIC. (this is probably redundant on most diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 843feb94a950..ccf03416e434 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -745,6 +745,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * OPCODE1() of the "short" jmp which checks the same condition. */ opc1 = OPCODE2(insn) - 0x10; + /* fall through */ default: if (!is_cond_jmp_opcode(opc1)) return -ENOSYS; diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 9119d8e41f1f..cf00ab6c6621 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -179,6 +179,8 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) if (insn->addr_bytes == 2) return -EINVAL; + /* fall through */ + case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 12d7e7fb4efd..19c6abf9ea31 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -52,7 +52,7 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); } -static void percpu_setup_debug_store(int cpu) +static void __init percpu_setup_debug_store(int cpu) { #ifdef CONFIG_CPU_SUP_INTEL int npages; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index e3cdc85ce5b6..ee8f8ab46941 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -444,7 +444,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, int i; pud_t *start, *pud_start; pgprotval_t prot, eff; - pud_t *prev_pud = NULL; pud_start = start = (pud_t *)p4d_page_vaddr(addr); @@ -462,7 +461,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, } else note_page(m, st, __pgprot(0), 0, 3); - prev_pud = start; start++; } } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 999d6d8f0bef..bc4bc7b2f075 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -685,9 +685,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, * that UV should be updated so that smp_call_function_many(), * etc, are optimal on UV. */ - unsigned int cpu; - - cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, info); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func_remote, diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index a4130b84d1ff..2c53b0f19329 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -2010,8 +2010,7 @@ static void make_per_cpu_thp(struct bau_control *smaster) int cpu; size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); - smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode); - memset(smaster->thp, 0, hpsz); + smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode); for_each_present_cpu(cpu) { smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; @@ -2135,15 +2134,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs, static int __init init_per_cpu(int nuvhubs, int base_part_pnode) { unsigned char *uvhub_mask; - void *vp; struct uvhub_desc *uvhub_descs; if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) timeout_us = calculate_destination_timeout(); - vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); - uvhub_descs = (struct uvhub_desc *)vp; - memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); + uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e07e91daaacc..201f0f2683f2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -442,6 +442,11 @@ static inline int enable_kprobe(struct kprobe *kp) { return -ENOSYS; } + +static inline bool within_kprobe_blacklist(unsigned long addr) +{ + return true; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { |