diff options
author | Max Krummenacher <max.krummenacher@toradex.com> | 2018-03-13 11:32:58 +0100 |
---|---|---|
committer | Max Krummenacher <max.krummenacher@toradex.com> | 2018-03-13 11:32:58 +0100 |
commit | 6fb9f3c8a4992f67dcb3ce413df2e22e96b2d400 (patch) | |
tree | 6e3071b2f179a62b027669ac2a238383293bf941 /include | |
parent | a126a5e5dc2fcc5cb36af14c89b440cc8e3bab30 (diff) | |
parent | 8b5ab55d254f36e89b1b53aeac7223d2d102483e (diff) |
Merge tag 'v4.4.121' into toradex_vf_4.4-nextColibri-VF_LXDE-Image_2.8b2.97-20180331
This is the 4.4.121 stable release
Diffstat (limited to 'include')
52 files changed, 652 insertions, 141 deletions
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h new file mode 100644 index 000000000000..df13637e4017 --- /dev/null +++ b/include/asm-generic/asm-prototypes.h @@ -0,0 +1,7 @@ +#include <linux/bitops.h> +extern void *__memset(void *, int, __kernel_size_t); +extern void *__memcpy(void *, const void *, __kernel_size_t); +extern void *__memmove(void *, const void *, __kernel_size_t); +extern void *memset(void *, int, __kernel_size_t); +extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *memmove(void *, const void *, __kernel_size_t); diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 000000000000..43199a049da5 --- /dev/null +++ b/include/asm-generic/export.h @@ -0,0 +1,94 @@ +#ifndef __ASM_GENERIC_EXPORT_H +#define __ASM_GENERIC_EXPORT_H + +#ifndef KSYM_FUNC +#define KSYM_FUNC(x) x +#endif +#ifdef CONFIG_64BIT +#define __put .quad +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 8 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 8 +#endif +#else +#define __put .long +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 4 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 4 +#endif +#endif + +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +#define KSYM(name) _##name +#else +#define KSYM(name) name +#endif + +/* + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. + */ +.macro ___EXPORT_SYMBOL name,val,sec +#ifdef CONFIG_MODULES + .globl KSYM(__ksymtab_\name) + .section ___ksymtab\sec+\name,"a" + .balign KSYM_ALIGN +KSYM(__ksymtab_\name): + __put \val, KSYM(__kstrtab_\name) + .previous + .section __ksymtab_strings,"a" +KSYM(__kstrtab_\name): +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX + .asciz "_\name" +#else + .asciz "\name" +#endif + .previous +#ifdef CONFIG_MODVERSIONS + .section ___kcrctab\sec+\name,"a" + .balign KCRC_ALIGN +KSYM(__kcrctab_\name): + __put KSYM(__crc_\name) + .weak KSYM(__crc_\name) + .previous +#endif +#endif +.endm +#undef __put + +#if defined(__KSYM_DEPS__) + +#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <linux/kconfig.h> +#include <generated/autoksyms.h> + +#define __EXPORT_SYMBOL(sym, val, sec) \ + __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) +#define __cond_export_sym(sym, val, sec, conf) \ + ___cond_export_sym(sym, val, sec, conf) +#define ___cond_export_sym(sym, val, sec, enabled) \ + __cond_export_sym_##enabled(sym, val, sec) +#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#define __cond_export_sym_0(sym, val, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#endif + +#define EXPORT_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) +#define EXPORT_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) +#define EXPORT_DATA_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),) +#define EXPORT_DATA_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),_gpl) + +#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ef2e8c97e183..a461b6604fd9 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -725,7 +725,14 @@ */ #define PERCPU_INPUT(cacheline) \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ + VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .; \ *(.data..percpu..first) \ + . = ALIGN(cacheline); \ + *(.data..percpu..user_mapped) \ + *(.data..percpu..user_mapped..shared_aligned) \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..user_mapped..page_aligned) \ + VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .; \ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ . = ALIGN(cacheline); \ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 9779c35f8454..dab9569f22bf 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -91,6 +91,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index c23ee1f7ee80..c2ff077168d3 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h @@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( struct mcryptd_cpu_queue { struct crypto_queue queue; + spinlock_t q_lock; struct work_struct work; }; diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index 894df59b74e4..d586f741cab5 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -30,8 +30,6 @@ struct poly1305_desc_ctx { }; int crypto_poly1305_init(struct shash_desc *desc); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen); unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen); int crypto_poly1305_update(struct shash_desc *desc, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4f6d29c8e3d8..132585a7fbd8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -31,16 +31,25 @@ struct bpf_map_ops { }; struct bpf_map { - atomic_t refcnt; + /* 1st cacheline with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). + */ + const struct bpf_map_ops *ops ____cacheline_aligned; enum bpf_map_type map_type; u32 key_size; u32 value_size; u32 max_entries; u32 pages; - struct user_struct *user; - const struct bpf_map_ops *ops; - struct work_struct work; + bool unpriv_array; + /* 7 bytes hole */ + + /* 2nd cacheline with misc members to avoid false sharing + * particularly with refcounting. + */ + struct user_struct *user ____cacheline_aligned; + atomic_t refcnt; atomic_t usercnt; + struct work_struct work; }; struct bpf_map_type_list { @@ -141,6 +150,7 @@ struct bpf_prog_aux { struct bpf_array { struct bpf_map map; u32 elem_size; + u32 index_mask; /* 'ownership' of prog_array is claimed by the first program that * is going to use this map or by the first program which FD is stored * in the map to make sure that all callers and callees have the same diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2189935075b4..a951fd10aaaa 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -71,6 +71,7 @@ struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; + bool cpu_map_populated; }; /* diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 3ea9aae2387d..7e04bcd9af8e 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -40,6 +40,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr_group(struct attribute_group *attrs); extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); +extern ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); + extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, diff --git a/include/linux/cred.h b/include/linux/cred.h index 257db64562e5..9e120c92551b 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -87,6 +87,7 @@ extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern int groups_search(const struct group_info *, kgid_t); extern bool may_setgroups(void); +extern void groups_sort(struct group_info *); /* access the groups "array" with this macro */ #define GROUP_AT(gi, i) \ diff --git a/include/linux/device.h b/include/linux/device.h index 7075a2485ed3..834000903525 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1272,8 +1272,11 @@ do { \ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else -#define dev_dbg_ratelimited(dev, fmt, ...) \ - no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) #endif #ifdef VERBOSE_DEBUG diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 5295535b60c6..a7b7a050bfa8 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/nospec.h> #include <linux/types.h> #include <linux/init.h> #include <linux/fs.h> @@ -81,8 +82,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - if (fd < fdt->max_fds) + if (fd < fdt->max_fds) { + fd = array_index_nospec(fd, fdt->max_fds); return rcu_dereference_raw(fdt->fd[fd]); + } return NULL; } diff --git a/include/linux/filter.h b/include/linux/filter.h index ccb98b459c59..677fa3b42194 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -466,6 +466,9 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct bpf_prog *fp); bool bpf_helper_changes_skb_data(void *func); +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); + #ifdef CONFIG_BPF_JIT typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); diff --git a/include/linux/fs.h b/include/linux/fs.h index c8decb7075d6..f746a59fcc88 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3066,5 +3066,6 @@ static inline bool dir_relax(struct inode *inode) } extern bool path_noexec(const struct path *path); +extern void inode_nohighmem(struct inode *inode); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 115bb81912cc..94a8aae8f9e2 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -764,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, { if (fscache_cookie_valid(cookie) && PageFsCache(page)) return __fscache_maybe_release_page(cookie, page, gfp); - return false; + return true; } /** diff --git a/include/linux/init.h b/include/linux/init.h index b449f378f995..5c4a3b7524e0 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -4,6 +4,13 @@ #include <linux/compiler.h> #include <linux/types.h> +/* Built-in __init functions needn't be compiled with retpoline */ +#if defined(RETPOLINE) && !defined(MODULE) +#define __noretpoline __attribute__((indirect_branch("keep"))) +#else +#define __noretpoline +#endif + /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this @@ -39,7 +46,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold notrace +#define __init __section(.init.text) __cold notrace __noretpoline #define __initdata __section(.init.data) #define __initconst __constsection(.init.rodata) #define __exitdata __section(.exit.data) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 402753bccafa..7b8e3afcc291 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -215,7 +215,8 @@ struct ipv6_pinfo { * 100: prefer care-of address */ dontfrag:1, - autoflowlabel:1; + autoflowlabel:1, + autoflowlabel_set:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h new file mode 100644 index 000000000000..b56c19010480 --- /dev/null +++ b/include/linux/kaiser.h @@ -0,0 +1,52 @@ +#ifndef _LINUX_KAISER_H +#define _LINUX_KAISER_H + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#include <asm/kaiser.h> + +static inline int kaiser_map_thread_stack(void *stack) +{ + /* + * Map that page of kernel stack on which we enter from user context. + */ + return kaiser_add_mapping((unsigned long)stack + + THREAD_SIZE - PAGE_SIZE, PAGE_SIZE, __PAGE_KERNEL); +} + +static inline void kaiser_unmap_thread_stack(void *stack) +{ + /* + * Note: may be called even when kaiser_map_thread_stack() failed. + */ + kaiser_remove_mapping((unsigned long)stack + + THREAD_SIZE - PAGE_SIZE, PAGE_SIZE); +} +#else + +/* + * These stubs are used whenever CONFIG_PAGE_TABLE_ISOLATION is off, which + * includes architectures that support KAISER, but have it disabled. + */ + +static inline void kaiser_init(void) +{ +} +static inline int kaiser_add_mapping(unsigned long addr, + unsigned long size, u64 flags) +{ + return 0; +} +static inline void kaiser_remove_mapping(unsigned long start, + unsigned long size) +{ +} +static inline int kaiser_map_thread_stack(void *stack) +{ + return 0; +} +static inline void kaiser_unmap_thread_stack(void *stack) +{ +} + +#endif /* !CONFIG_PAGE_TABLE_ISOLATION */ +#endif /* _LINUX_KAISER_H */ diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index b33c7797eb57..a94b5bf57f51 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -17,10 +17,11 @@ * the last step cherry picks the 2nd arg, we get a zero. */ #define __ARG_PLACEHOLDER_1 0, -#define config_enabled(cfg) _config_enabled(cfg) -#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) -#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) -#define ___config_enabled(__ignored, val, ...) val +#define config_enabled(cfg) ___is_defined(cfg) +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) +#define __take_second_arg(__ignored, val, ...) val /* * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 @@ -42,7 +43,7 @@ * built-in code when CONFIG_FOO is set to 'm'. */ #define IS_REACHABLE(option) (config_enabled(option) || \ - (config_enabled(option##_MODULE) && config_enabled(MODULE))) + (config_enabled(option##_MODULE) && __is_defined(MODULE))) /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 2b6a204bd8d4..3ffc69ebe967 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -64,6 +64,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) /* + * Same as ktime_add(), but avoids undefined behaviour on overflow; however, + * this means that you must check the result for overflow yourself. + */ +#define ktime_add_unsafe(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) + +/* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 70fffeba7495..a4441784503b 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -1,9 +1,16 @@ #ifndef _LINUX_MMU_CONTEXT_H #define _LINUX_MMU_CONTEXT_H +#include <asm/mmu_context.h> + struct mm_struct; void use_mm(struct mm_struct *mm); void unuse_mm(struct mm_struct *mm); +/* Architectures that care about IRQ state in switch_mm can override this. */ +#ifndef switch_mm_irqs_off +# define switch_mm_irqs_off switch_mm +#endif + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ff88d6189411..b93b578cfa42 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -131,8 +131,9 @@ enum zone_stat_item { NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK, /* Second 128 byte cacheline */ + NR_KERNEL_STACK, + NR_KAISERTABLE, NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, diff --git a/include/linux/module.h b/include/linux/module.h index b229a9961d02..c9f2f85017ad 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -789,6 +789,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ +#ifdef RETPOLINE +extern bool retpoline_module_ok(bool has_retpoline); +#else +static inline bool retpoline_module_ok(bool has_retpoline) +{ + return true; +} +#endif + #ifdef CONFIG_MODULE_SIG static inline bool module_sig_ok(struct module *module) { diff --git a/include/linux/msi.h b/include/linux/msi.h index f0f43ec45ee7..d0d50cf00b4d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -17,7 +17,13 @@ struct msi_desc; struct pci_dev; struct platform_msi_priv_data; void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +#ifdef CONFIG_GENERIC_MSI_IRQ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); +#else +static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) +{ +} +#endif typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, struct msi_msg *msg); @@ -105,18 +111,21 @@ struct msi_desc { struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); void *msi_desc_to_pci_sysdata(struct msi_desc *desc); +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); #else /* CONFIG_PCI_MSI */ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) { return NULL; } +static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) +{ +} #endif /* CONFIG_PCI_MSI */ struct msi_desc *alloc_msi_entry(struct device *dev); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 806d0ab845e0..676d3d2a1a0a 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -265,75 +265,67 @@ void map_destroy(struct mtd_info *mtd); #define INVALIDATE_CACHED_RANGE(map, from, size) \ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) - -static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if (val1.x[i] != val2.x[i]) - return 0; - } - - return 1; -} - -static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] & val2.x[i]; - - return r; -} - -static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] & ~val2.x[i]; - - return r; -} - -static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] | val2.x[i]; - - return r; -} - -static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if ((val1.x[i] & val2.x[i]) != val3.x[i]) - return 0; - } - - return 1; -} - -static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if (val1.x[i] & val2.x[i]) - return 1; - } - - return 0; -} +#define map_word_equal(map, val1, val2) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) \ + if ((val1).x[i] != (val2).x[i]) { \ + ret = 0; \ + break; \ + } \ + ret; \ +}) + +#define map_word_and(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & (val2).x[i]; \ + r; \ +}) + +#define map_word_clr(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & ~(val2).x[i]; \ + r; \ +}) + +#define map_word_or(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] | (val2).x[i]; \ + r; \ +}) + +#define map_word_andequal(map, val1, val2, val3) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) { \ + if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ + ret = 0; \ + break; \ + } \ + } \ + ret; \ +}) + +#define map_word_bitsset(map, val1, val2) \ +({ \ + int i, ret = 0; \ + for (i = 0; i < map_words(map); i++) { \ + if ((val1).x[i] & (val2).x[i]) { \ + ret = 1; \ + break; \ + } \ + } \ + ret; \ +}) static inline map_word map_word_load(struct map_info *map, const void *ptr) { diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 1c28f8879b1c..067b37aff4a1 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -148,6 +148,7 @@ struct sh_flctl { struct platform_device *pdev; struct dev_pm_qos_request pm_qos; void __iomem *reg; + resource_size_t fifo; uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ int read_bytes; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 04078e8a4803..d6c53fce006b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); +unsigned int *xt_alloc_entry_offsets(unsigned int size); +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, @@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a, * allows us to return 0 for single core systems without forcing * callers to deal with SMP vs. NONSMP issues. */ -static inline u64 xt_percpu_counter_alloc(void) +static inline unsigned long xt_percpu_counter_alloc(void) { if (nr_cpu_ids > 1) { void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), sizeof(struct xt_counters)); if (res == NULL) - return (u64) -ENOMEM; + return -ENOMEM; - return (u64) (__force unsigned long) res; + return (__force unsigned long) res; } return 0; diff --git a/include/linux/nospec.h b/include/linux/nospec.h new file mode 100644 index 000000000000..4a040862f4bf --- /dev/null +++ b/include/linux/nospec.h @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2018 Linus Torvalds. All rights reserved. +// Copyright(c) 2018 Alexei Starovoitov. All rights reserved. +// Copyright(c) 2018 Intel Corporation. All rights reserved. + +#ifndef _LINUX_NOSPEC_H +#define _LINUX_NOSPEC_H + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * When @index is out of bounds (@index >= @size), the sign bit will be + * set. Extend the sign bit to all bits and invert, giving a result of + * zero for an out of bounds index, or ~0 if within bounds [0, @size). + */ +#ifndef array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Warn developers about inappropriate array_index_nospec() usage. + * + * Even if the CPU speculates past the WARN_ONCE branch, the + * sign bit of @index is taken into account when generating the + * mask. + * + * This warning is compiled out when the compiler can infer that + * @index and @size are less than LONG_MAX. + */ + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) + return 0; + + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} +#endif + +/* + * array_index_nospec - sanitize an array index after a bounds check + * + * For a code sequence like: + * + * if (index < size) { + * index = array_index_nospec(index, size); + * val = array[index]; + * } + * + * ...if the CPU speculates past the bounds check then + * array_index_nospec() will clamp the index within the range of [0, + * size). + */ +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8f16299ca068..8902f23bb770 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -35,6 +35,12 @@ #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#define USER_MAPPED_SECTION "..user_mapped" +#else +#define USER_MAPPED_SECTION "" +#endif + /* * Base implementations of per-CPU variable declarations and definitions, where * the section in which the variable is to be placed is provided by the @@ -115,6 +121,12 @@ #define DEFINE_PER_CPU(type, name) \ DEFINE_PER_CPU_SECTION(type, name, "") +#define DECLARE_PER_CPU_USER_MAPPED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION) + +#define DEFINE_PER_CPU_USER_MAPPED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION) + /* * Declaration/definition used for per-CPU variables that must come first in * the set of variables. @@ -144,6 +156,14 @@ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ ____cacheline_aligned_in_smp +#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + #define DECLARE_PER_CPU_ALIGNED(type, name) \ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ ____cacheline_aligned @@ -162,11 +182,21 @@ #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) +/* + * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode. + */ +#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \ + __aligned(PAGE_SIZE) + +#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \ + __aligned(PAGE_SIZE) /* * Declaration/definition used for per-CPU variables that must be read mostly. */ -#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ +#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ diff --git a/include/linux/phy.h b/include/linux/phy.h index 5bc4b9d563a9..dbfd5ce9350f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -683,6 +683,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) } /** + * phy_interface_mode_is_rgmii - Convenience function for testing if a + * PHY interface mode is RGMII (all variants) + * @mode: the phy_interface_t enum + */ +static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) +{ + return mode >= PHY_INTERFACE_MODE_RGMII && + mode <= PHY_INTERFACE_MODE_RGMII_TXID; +}; + +/** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) * @phydev: the phy_device struct diff --git a/include/linux/sched.h b/include/linux/sched.h index e887c8d6f395..90bea398e5e0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1313,6 +1313,7 @@ struct sched_dl_entity { u64 dl_deadline; /* relative deadline of each instance */ u64 dl_period; /* separation of two instances (period) */ u64 dl_bw; /* dl_runtime / dl_deadline */ + u64 dl_density; /* dl_runtime / dl_deadline */ /* * Actual scheduling parameters. Initialized with the values above, diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 8c9131db2b25..b050ef51e27e 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -16,7 +16,6 @@ struct sh_eth_plat_data { unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; - unsigned needs_init:1; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index aa30789b0f65..98bb781a2eff 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -122,6 +122,7 @@ extern char *kstrdup(const char *s, gfp_t gfp); extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); +extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 318c24612458..2260f92f1492 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) return (struct tcphdr *)skb_transport_header(skb); } +static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) +{ + return th->doff * 4; +} + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return tcp_hdr(skb)->doff * 4; + return __tcp_hdrlen(tcp_hdr(skb)); } static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index e623d392db0c..8ef3a61fdc74 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -80,10 +80,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #endif #ifdef CONFIG_DEBUG_TLBFLUSH -#ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ -#endif /* CONFIG_SMP */ NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ONE, #endif /* CONFIG_DEBUG_TLBFLUSH */ diff --git a/include/net/arp.h b/include/net/arp.h index 5e0f891d476c..1b3f86981757 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) { + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) + key = INADDR_ANY; + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); } diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h new file mode 100644 index 000000000000..151accae708b --- /dev/null +++ b/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _NET_DST_CACHE_H +#define _NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unsed. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif diff --git a/include/net/ip.h b/include/net/ip.h index 7476bb10ff37..639398af273b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -33,6 +33,8 @@ #include <net/flow.h> #include <net/flow_dissector.h> +#define IPV4_MIN_MTU 68 /* RFC 791 */ + struct sock; struct inet_skb_parm { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 9c2c044153f6..d143c8480681 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -5,6 +5,8 @@ #include <linux/netdevice.h> #include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> +#include <net/ip_tunnels.h> +#include <net/dst_cache.h> #define IP6TUNNEL_ERR_TIMEO (30*HZ) @@ -32,12 +34,6 @@ struct __ip6_tnl_parm { __be32 o_key; }; -struct ip6_tnl_dst { - seqlock_t lock; - struct dst_entry __rcu *dst; - u32 cookie; -}; - /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -45,7 +41,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ + struct dst_cache dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -65,11 +61,6 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); -int ip6_tnl_dst_init(struct ip6_tnl *t); -void ip6_tnl_dst_destroy(struct ip6_tnl *t); -void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 86a7bdd61d1a..74bc08d82e14 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -13,6 +13,7 @@ #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/lwtunnel.h> +#include <net/dst_cache.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@ -85,11 +86,6 @@ struct ip_tunnel_prl_entry { struct rcu_head rcu_head; }; -struct ip_tunnel_dst { - struct dst_entry __rcu *dst; - __be32 saddr; -}; - struct metadata_dst; struct ip_tunnel { @@ -108,7 +104,7 @@ struct ip_tunnel { int tun_hlen; /* Precalculated header length */ int mlink; - struct ip_tunnel_dst __percpu *dst_cache; + struct dst_cache dst_cache; struct ip_tunnel_parm parms; @@ -248,7 +244,6 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); -void ip_tunnel_dst_reset_all(struct ip_tunnel *t); int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 7a8066b90289..84f0d0602433 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); +bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 2dcea635ecce..93328c61934a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2) return net1 == net2; } +static inline int check_net(const struct net *net) +{ + return atomic_read(&net->count) != 0; +} + void net_drop_ns(void *); #else @@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2) return 1; } +static inline int check_net(const struct net *net) +{ + return 1; +} + #define net_drop_ns NULL #endif diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 9c5638ad872e..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -28,8 +28,8 @@ struct nf_queue_handler { struct nf_hook_ops *ops); }; -void nf_register_queue_handler(const struct nf_queue_handler *qh); -void nf_unregister_queue_handler(void); +void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); +void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); void nf_queue_entry_get_refs(struct nf_queue_entry *entry); diff --git a/include/net/netlink.h b/include/net/netlink.h index 0e3172751755..5ffaea4665f8 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -745,7 +745,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, */ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) { - return nla_put(skb, attrtype, sizeof(u8), &value); + /* temporary variables to work around GCC PR81715 with asan-stack=1 */ + u8 tmp = value; + + return nla_put(skb, attrtype, sizeof(u8), &tmp); } /** @@ -756,7 +759,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) */ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) { - return nla_put(skb, attrtype, sizeof(u16), &value); + u16 tmp = value; + + return nla_put(skb, attrtype, sizeof(u16), &tmp); } /** @@ -767,7 +772,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) */ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) { - return nla_put(skb, attrtype, sizeof(__be16), &value); + __be16 tmp = value; + + return nla_put(skb, attrtype, sizeof(__be16), &tmp); } /** @@ -778,7 +785,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) */ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) { - return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); + __be16 tmp = value; + + return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); } /** @@ -789,7 +798,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) */ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) { - return nla_put(skb, attrtype, sizeof(__le16), &value); + __le16 tmp = value; + + return nla_put(skb, attrtype, sizeof(__le16), &tmp); } /** @@ -800,7 +811,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) */ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) { - return nla_put(skb, attrtype, sizeof(u32), &value); + u32 tmp = value; + + return nla_put(skb, attrtype, sizeof(u32), &tmp); } /** @@ -811,7 +824,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) */ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) { - return nla_put(skb, attrtype, sizeof(__be32), &value); + __be32 tmp = value; + + return nla_put(skb, attrtype, sizeof(__be32), &tmp); } /** @@ -822,7 +837,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) */ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) { - return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); + __be32 tmp = value; + + return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); } /** @@ -833,7 +850,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) */ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) { - return nla_put(skb, attrtype, sizeof(__le32), &value); + __le32 tmp = value; + + return nla_put(skb, attrtype, sizeof(__le32), &tmp); } /** @@ -844,7 +863,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) */ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) { - return nla_put(skb, attrtype, sizeof(u64), &value); + u64 tmp = value; + + return nla_put(skb, attrtype, sizeof(u64), &tmp); } /** @@ -855,7 +876,9 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) */ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) { - return nla_put(skb, attrtype, sizeof(__be64), &value); + __be64 tmp = value; + + return nla_put(skb, attrtype, sizeof(__be64), &tmp); } /** @@ -866,7 +889,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) */ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) { - return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); + __be64 tmp = value; + + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); } /** @@ -877,7 +902,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) */ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) { - return nla_put(skb, attrtype, sizeof(__le64), &value); + __le64 tmp = value; + + return nla_put(skb, attrtype, sizeof(__le64), &tmp); } /** @@ -888,7 +915,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) */ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) { - return nla_put(skb, attrtype, sizeof(s8), &value); + s8 tmp = value; + + return nla_put(skb, attrtype, sizeof(s8), &tmp); } /** @@ -899,7 +928,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) */ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) { - return nla_put(skb, attrtype, sizeof(s16), &value); + s16 tmp = value; + + return nla_put(skb, attrtype, sizeof(s16), &tmp); } /** @@ -910,7 +941,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) */ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) { - return nla_put(skb, attrtype, sizeof(s32), &value); + s32 tmp = value; + + return nla_put(skb, attrtype, sizeof(s32), &tmp); } /** @@ -921,7 +954,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) */ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value) { - return nla_put(skb, attrtype, sizeof(s64), &value); + s64 tmp = value; + + return nla_put(skb, attrtype, sizeof(s64), &tmp); } /** @@ -969,7 +1004,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, __be32 addr) { - return nla_put_be32(skb, attrtype, addr); + __be32 tmp = addr; + + return nla_put_be32(skb, attrtype, tmp); } /** diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 38aa4983e2a9..36d723579af2 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -5,11 +5,13 @@ struct proc_dir_entry; struct nf_logger; +struct nf_queue_handler; struct netns_nf { #if defined CONFIG_PROC_FS struct proc_dir_entry *proc_netfilter; #endif + const struct nf_queue_handler __rcu *queue_handler; const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; #ifdef CONFIG_SYSCTL struct ctl_table_header *nf_log_dir_header; diff --git a/include/net/red.h b/include/net/red.h index 76e0b5f922c6..3618cdfec884 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -167,6 +167,17 @@ static inline void red_set_vars(struct red_vars *v) v->qcount = -1; } +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) +{ + if (fls(qth_min) + Wlog > 32) + return false; + if (fls(qth_max) + Wlog > 32) + return false; + if (qth_max < qth_min) + return false; + return true; +} + static inline void red_set_parms(struct red_parms *p, u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, u8 Scell_log, u8 *stab, u32 max_P) @@ -178,7 +189,7 @@ static inline void red_set_parms(struct red_parms *p, p->qth_max = qth_max << Wlog; p->Wlog = Wlog; p->Plog = Plog; - if (delta < 0) + if (delta <= 0) delta = 1; p->qth_delta = delta; if (!max_P) { diff --git a/include/net/udplite.h b/include/net/udplite.h index 80761938b9a7..8228155b305e 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) UDP_SKB_CB(skb)->cscov = cscov; if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; + skb->csum_valid = 0; } return 0; diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 3afec7032448..20bc71c3e0b8 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ #define SG_DEFAULT_RETRIES 0 /* Defaults, commented if they differ from original sg driver */ -#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */ #define SG_DEF_FORCE_PACK_ID 0 #define SG_DEF_KEEP_ORPHAN 0 #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 9982a2bcb880..0eed9fd79ea5 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -496,6 +496,7 @@ struct se_cmd { #define CMD_T_BUSY (1 << 9) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) +#define CMD_T_PRE_EXECUTE (1 << 12) spinlock_t t_state_lock; struct kref cmd_kref; struct completion t_transport_stop_comp; diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index 758607226bfd..2cd449328aee 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h @@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent, TP_STRUCT__entry( __string( name, core->name ) - __string( pname, parent->name ) + __string( pname, parent ? parent->name : "none" ) ), TP_fast_assign( __assign_str(name, core->name); - __assign_str(pname, parent->name); + __assign_str(pname, parent ? parent->name : "none"); ), TP_printk("%s %s", __get_str(name), __get_str(pname)) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index d6f83222a6a1..67ff6555967f 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -204,7 +204,7 @@ TRACE_EVENT(kvm_ack_irq, { KVM_TRACE_MMIO_WRITE, "write" } TRACE_EVENT(kvm_mmio, - TP_PROTO(int type, int len, u64 gpa, u64 val), + TP_PROTO(int type, int len, u64 gpa, void *val), TP_ARGS(type, len, gpa, val), TP_STRUCT__entry( @@ -218,7 +218,10 @@ TRACE_EVENT(kvm_mmio, __entry->type = type; __entry->len = len; __entry->gpa = gpa; - __entry->val = val; + __entry->val = 0; + if (val) + memcpy(&__entry->val, val, + min_t(u32, sizeof(__entry->val), len)); ), TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index bc81fb2e1f0e..6f04cb419115 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h @@ -26,6 +26,19 @@ #define EPOLL_CTL_DEL 2 #define EPOLL_CTL_MOD 3 +/* Epoll event masks */ +#define EPOLLIN 0x00000001 +#define EPOLLPRI 0x00000002 +#define EPOLLOUT 0x00000004 +#define EPOLLERR 0x00000008 +#define EPOLLHUP 0x00000010 +#define EPOLLRDNORM 0x00000040 +#define EPOLLRDBAND 0x00000080 +#define EPOLLWRNORM 0x00000100 +#define EPOLLWRBAND 0x00000200 +#define EPOLLMSG 0x00000400 +#define EPOLLRDHUP 0x00002000 + /* * Request the handling of system wakeup events so as to prevent system suspends * from happening while those events are being processed. |