summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/xen/interface/io/blkif.h10
19 files changed, 131 insertions, 49 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c602c7718421..ddabed1f51c2 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -169,7 +169,8 @@ struct acpi_device_flags {
u32 ejectable:1;
u32 power_manageable:1;
u32 match_driver:1;
- u32 reserved:27;
+ u32 no_hotplug:1;
+ u32 reserved:26;
};
/* File System */
@@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
+void acpi_bus_no_hotplug(acpi_handle handle);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif
#ifndef pte_accessible
-# define pte_accessible(pte) ((void)(pte),1)
+# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
barrier();
#endif
- if (pmd_none(pmdval))
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
return 1;
if (unlikely(pmd_bad(pmdval))) {
- if (!pmd_trans_huge(pmdval))
- pmd_clear_bad(pmd);
+ pmd_clear_bad(pmd);
return 1;
}
return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
#include <linux/thread_info.h>
-/*
- * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
- * that think a non-zero value indicates we cannot preempt.
- */
+#define PREEMPT_ENABLED (0)
+
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+ return current_thread_info()->preempt_count;
}
static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
return &current_thread_info()->preempt_count;
}
-/*
- * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
- * alternative is loosing a reschedule. Better schedule too often -- also this
- * should be a very rare operation.
- */
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
-/*
- * We fold the NEED_RESCHED bit into the preempt count such that
- * preempt_enable() can decrement and test for needing to reschedule with a
- * single instruction.
- *
- * We invert the actual bit, so that when the decrement hits 0 we know we both
- * need to resched (the bit is cleared) and can resched (no preempt count).
- */
-
static __always_inline void set_preempt_need_resched(void)
{
- *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}
static __always_inline void clear_preempt_need_resched(void)
{
- *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+ return false;
}
/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return !--*preempt_count_ptr();
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
}
/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(void)
{
- return unlikely(!*preempt_count_ptr());
+ return unlikely(!preempt_count() && tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5c745a..3e0fbe441763 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26485f4..9b503376738f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
#define USE_CMPXCHG_LOCKREF \
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
- IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
struct lockref {
union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = ((u64)al * mul) >> shift;
+ if (ah)
+ ret += ((u64)ah * mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#endif
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode);
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count);
#else
static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return page->ptl;
}
-#else /* BLOATED_SPINLOCKS */
+#else /* ALLOC_SPLIT_PTLOCKS */
static inline bool ptlock_alloc(struct page *page)
{
return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return &page->ptl;
}
-#endif /* BLOATED_SPINLOCKS */
+#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
* Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+ /*
+ * An operation with batched TLB flushing is going on. Anything that
+ * can move process memory needs to flush the TLB when moving a
+ * PROT_NONE or PROT_NUMA mapped page.
+ */
+ bool tlb_flush_pending;
+#endif
struct uprobes_state uprobes_state;
};
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+/*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ return mm->tlb_flush_pending;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+ mm->tlb_flush_pending = true;
+
+ /*
+ * Guarantee that the tlb_flush_pending store does not leak into the
+ * critical section updating the page tables
+ */
+ smp_mb__before_spinlock();
+}
+/* Clearing is done after a TLB flush, which also provides a barrier. */
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ mm->tlb_flush_pending = false;
+}
+#else
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ return false;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+#endif
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890abe1f0..a5fc7d01aad6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) name
#else
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
char *buf;
size_t bufsize;
struct mutex read_mutex; /* serialize open/read/close */
+ int flags;
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
void *data;
};
+#define PSTORE_FLAGS_FRAGILE 1
+
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
* Architecture-specific implementations of sys_reboot commands.
*/
+extern void migrate_to_reboot_cpu(void);
extern void machine_restart(char *cmd);
extern void machine_halt(void);
extern void machine_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037dfacb..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
-#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
-
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
struct uts_namespace;
struct load_weight {
- unsigned long weight, inv_weight;
+ unsigned long weight;
+ u32 inv_weight;
};
struct sched_avg {
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c627ee..61e1935c91b1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
};
struct ib_udata {
- void __user *inbuf;
+ const void __user *inbuf;
void __user *outbuf;
size_t inlen;
size_t outlen;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6afa69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
u32 acl_index;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
- u64 num_cmds;
- u64 read_bytes;
- u64 write_bytes;
- spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u32 max_bytes_per_io;
struct se_device *da_dev;
struct config_group da_group;
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912afe7a..f854ca4a1372 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
#define DRM_VMW_PARAM_FIFO_CAPS 4
#define DRM_VMW_PARAM_MAX_FB_SIZE 5
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
/**
* struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
*
* { u64 weight; } && PERF_SAMPLE_WEIGHT
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t _pad1; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
struct blkif_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
struct blkif_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
#endif
uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* make it 64 byte aligned */
#else
uint64_t _pad3; /* make it 64 byte aligned */