From 9fa45070a2e59a871e1cd3370173369f3a4f61e2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 4 Sep 2018 11:48:26 +0100 Subject: locking/atomics: Switch to generated fallbacks As a step to ensuring the atomic* APIs are consistent, switch to fallbacks generated by gen-atomic-fallback.sh. These are checked in rather than generated with Kbuild, since: * This allows inspection of the atomics with git grep and ctags on a pristine tree, which Linus strongly prefers being able to do. * The fallbacks are not affected by machine details or configuration options, so it is not necessary to regenerate them to take these into account. * These are included by files required *very* early in the build process (e.g. for generating bounds.h), and we'd rather not complicate the top-level Kbuild file with dependencies. The new fallback header should be equivalent to the old fallbacks in , but: * It is formatted a little differently due to scripting ensuring things are more regular than they used to be. * Fallbacks are now expanded in-place as static inline functions rather than macros. * The prototypes for fallbacks are arragned consistently with the return type on a separate line to try to keep to a sensible line length. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: catalin.marinas@arm.com Cc: linuxdrivers@attotech.com Cc: dvyukov@google.com Cc: Boqun Feng Cc: arnd@arndb.de Cc: aryabinin@virtuozzo.com Cc: glider@google.com Link: http://lkml.kernel.org/r/20180904104830.2975-3-mark.rutland@arm.com Signed-off-by: Ingo Molnar --- include/linux/atomic-fallback.h | 2294 +++++++++++++++++++++++++++++++++++++++ include/linux/atomic.h | 1241 +-------------------- 2 files changed, 2295 insertions(+), 1240 deletions(-) create mode 100644 include/linux/atomic-fallback.h (limited to 'include/linux') diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h new file mode 100644 index 000000000000..1c02c0112fbb --- /dev/null +++ b/include/linux/atomic-fallback.h @@ -0,0 +1,2294 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) \ + __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) \ + __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) \ + __atomic_op_fence(xchg, __VA_ARGS__) +#endif + +#endif /* xchg_relaxed */ + +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif + +#endif /* cmpxchg_relaxed */ + +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif + +#endif /* cmpxchg64_relaxed */ + +#ifndef atomic_read_acquire +static inline int +atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic_read_acquire atomic_read_acquire +#endif + +#ifndef atomic_set_release +static inline void +atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic_set_release atomic_set_release +#endif + +#ifndef atomic_add_return_relaxed +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return +#define atomic_add_return_relaxed atomic_add_return +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +static inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_add_return_acquire atomic_add_return_acquire +#endif + +#ifndef atomic_add_return_release +static inline int +atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_add_return_relaxed(i, v); +} +#define atomic_add_return_release atomic_add_return_release +#endif + +#ifndef atomic_add_return +static inline int +atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_add_return atomic_add_return +#endif + +#endif /* atomic_add_return_relaxed */ + +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add +#define atomic_fetch_add_relaxed atomic_fetch_add +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +static inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_add_acquire atomic_fetch_add_acquire +#endif + +#ifndef atomic_fetch_add_release +static inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_add_relaxed(i, v); +} +#define atomic_fetch_add_release atomic_fetch_add_release +#endif + +#ifndef atomic_fetch_add +static inline int +atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_add atomic_fetch_add +#endif + +#endif /* atomic_fetch_add_relaxed */ + +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return +#define atomic_sub_return_relaxed atomic_sub_return +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +static inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_sub_return_acquire atomic_sub_return_acquire +#endif + +#ifndef atomic_sub_return_release +static inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_sub_return_relaxed(i, v); +} +#define atomic_sub_return_release atomic_sub_return_release +#endif + +#ifndef atomic_sub_return +static inline int +atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_sub_return atomic_sub_return +#endif + +#endif /* atomic_sub_return_relaxed */ + +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +static inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#endif + +#ifndef atomic_fetch_sub_release +static inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_sub_relaxed(i, v); +} +#define atomic_fetch_sub_release atomic_fetch_sub_release +#endif + +#ifndef atomic_fetch_sub +static inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_sub atomic_fetch_sub +#endif + +#endif /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_inc +static inline void +atomic_inc(atomic_t *v) +{ + atomic_add(1, v); +} +#define atomic_inc atomic_inc +#endif + +#ifndef atomic_inc_return_relaxed +#ifdef atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return +#define atomic_inc_return_relaxed atomic_inc_return +#endif /* atomic_inc_return */ + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + return atomic_add_return(1, v); +} +#define atomic_inc_return atomic_inc_return +#endif + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + return atomic_add_return_acquire(1, v); +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + return atomic_add_return_release(1, v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return_relaxed +static inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + return atomic_add_return_relaxed(1, v); +} +#define atomic_inc_return_relaxed atomic_inc_return_relaxed +#endif + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + int ret = atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_inc_return_relaxed(v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_inc_return atomic_inc_return +#endif + +#endif /* atomic_inc_return_relaxed */ + +#ifndef atomic_fetch_inc_relaxed +#ifdef atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + return atomic_fetch_add(1, v); +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + return atomic_fetch_add_acquire(1, v); +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + return atomic_fetch_add_release(1, v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc_relaxed +static inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + return atomic_fetch_add_relaxed(1, v); +} +#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed +#endif + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_inc_relaxed(v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#endif /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_dec +static inline void +atomic_dec(atomic_t *v) +{ + atomic_sub(1, v); +} +#define atomic_dec atomic_dec +#endif + +#ifndef atomic_dec_return_relaxed +#ifdef atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return +#define atomic_dec_return_relaxed atomic_dec_return +#endif /* atomic_dec_return */ + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + return atomic_sub_return(1, v); +} +#define atomic_dec_return atomic_dec_return +#endif + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + return atomic_sub_return_acquire(1, v); +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + return atomic_sub_return_release(1, v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return_relaxed +static inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + return atomic_sub_return_relaxed(1, v); +} +#define atomic_dec_return_relaxed atomic_dec_return_relaxed +#endif + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + int ret = atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_dec_return_relaxed(v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_dec_return atomic_dec_return +#endif + +#endif /* atomic_dec_return_relaxed */ + +#ifndef atomic_fetch_dec_relaxed +#ifdef atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + return atomic_fetch_sub(1, v); +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + return atomic_fetch_sub_acquire(1, v); +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + return atomic_fetch_sub_release(1, v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec_relaxed +static inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + return atomic_fetch_sub_relaxed(1, v); +} +#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed +#endif + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_dec_relaxed(v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#endif /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and +#define atomic_fetch_and_relaxed atomic_fetch_and +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +static inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_and_acquire atomic_fetch_and_acquire +#endif + +#ifndef atomic_fetch_and_release +static inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_and_relaxed(i, v); +} +#define atomic_fetch_and_release atomic_fetch_and_release +#endif + +#ifndef atomic_fetch_and +static inline int +atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_and atomic_fetch_and +#endif + +#endif /* atomic_fetch_and_relaxed */ + +#ifndef atomic_andnot +static inline void +atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#define atomic_andnot atomic_andnot +#endif + +#ifndef atomic_fetch_andnot_relaxed +#ifdef atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + return atomic_fetch_and(~i, v); +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot_relaxed +static inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return atomic_fetch_and_relaxed(~i, v); +} +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#endif + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_andnot_relaxed(i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#endif /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or +#define atomic_fetch_or_relaxed atomic_fetch_or +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +static inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_or_acquire atomic_fetch_or_acquire +#endif + +#ifndef atomic_fetch_or_release +static inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_or_relaxed(i, v); +} +#define atomic_fetch_or_release atomic_fetch_or_release +#endif + +#ifndef atomic_fetch_or +static inline int +atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_or atomic_fetch_or +#endif + +#endif /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +static inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#endif + +#ifndef atomic_fetch_xor_release +static inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_xor_relaxed(i, v); +} +#define atomic_fetch_xor_release atomic_fetch_xor_release +#endif + +#ifndef atomic_fetch_xor +static inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_xor atomic_fetch_xor +#endif + +#endif /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_xchg_relaxed +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg +#define atomic_xchg_relaxed atomic_xchg +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +static inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic_xchg_acquire atomic_xchg_acquire +#endif + +#ifndef atomic_xchg_release +static inline int +atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return atomic_xchg_relaxed(v, i); +} +#define atomic_xchg_release atomic_xchg_release +#endif + +#ifndef atomic_xchg +static inline int +atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic_xchg atomic_xchg +#endif + +#endif /* atomic_xchg_relaxed */ + +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +static inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#endif + +#ifndef atomic_cmpxchg_release +static inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return atomic_cmpxchg_relaxed(v, old, new); +} +#define atomic_cmpxchg_release atomic_cmpxchg_release +#endif + +#ifndef atomic_cmpxchg +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_cmpxchg atomic_cmpxchg +#endif + +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_relaxed +#ifdef atomic_try_cmpxchg +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg +#define atomic_try_cmpxchg_release atomic_try_cmpxchg +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg +#endif /* atomic_try_cmpxchg */ + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg_relaxed +static inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +#endif + +#else /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return atomic_try_cmpxchg_relaxed(v, old, new); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#endif /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_sub_and_test +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + return atomic_sub_return(i, v) == 0; +} +#define atomic_sub_and_test atomic_sub_and_test +#endif + +#ifndef atomic_dec_and_test +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic_dec_and_test(atomic_t *v) +{ + return atomic_dec_return(v) == 0; +} +#define atomic_dec_and_test atomic_dec_and_test +#endif + +#ifndef atomic_inc_and_test +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_inc_and_test(atomic_t *v) +{ + return atomic_inc_return(v) == 0; +} +#define atomic_inc_and_test atomic_inc_and_test +#endif + +#ifndef atomic_add_negative +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} +#define atomic_add_negative atomic_add_negative +#endif + +#ifndef atomic_fetch_add_unless +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic_fetch_add_unless atomic_fetch_add_unless +#endif + +#ifndef atomic_add_unless +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + return atomic_fetch_add_unless(v, a, u) != u; +} +#define atomic_add_unless atomic_add_unless +#endif + +#ifndef atomic_inc_not_zero +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic_inc_not_zero(atomic_t *v) +{ + return atomic_add_unless(v, 1, 0); +} +#define atomic_inc_not_zero atomic_inc_not_zero +#endif + +#ifndef atomic_inc_unless_negative +static inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic_inc_unless_negative atomic_inc_unless_negative +#endif + +#ifndef atomic_dec_unless_positive +static inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic_dec_unless_positive atomic_dec_unless_positive +#endif + +#ifndef atomic_dec_if_positive +static inline int +atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic_dec_if_positive atomic_dec_if_positive +#endif + +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + +#ifndef atomic64_read_acquire +static inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic64_read_acquire atomic64_read_acquire +#endif + +#ifndef atomic64_set_release +static inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic64_set_release atomic64_set_release +#endif + +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return +#define atomic64_add_return_relaxed atomic64_add_return +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +static inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_add_return_acquire atomic64_add_return_acquire +#endif + +#ifndef atomic64_add_return_release +static inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_add_return_relaxed(i, v); +} +#define atomic64_add_return_release atomic64_add_return_release +#endif + +#ifndef atomic64_add_return +static inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_add_return atomic64_add_return +#endif + +#endif /* atomic64_add_return_relaxed */ + +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +static inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#endif + +#ifndef atomic64_fetch_add_release +static inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_add_relaxed(i, v); +} +#define atomic64_fetch_add_release atomic64_fetch_add_release +#endif + +#ifndef atomic64_fetch_add +static inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_add atomic64_fetch_add +#endif + +#endif /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return +#define atomic64_sub_return_relaxed atomic64_sub_return +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +static inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#endif + +#ifndef atomic64_sub_return_release +static inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_sub_return_relaxed(i, v); +} +#define atomic64_sub_return_release atomic64_sub_return_release +#endif + +#ifndef atomic64_sub_return +static inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_sub_return atomic64_sub_return +#endif + +#endif /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +static inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#endif + +#ifndef atomic64_fetch_sub_release +static inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_sub_relaxed(i, v); +} +#define atomic64_fetch_sub_release atomic64_fetch_sub_release +#endif + +#ifndef atomic64_fetch_sub +static inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_sub atomic64_fetch_sub +#endif + +#endif /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_inc +static inline void +atomic64_inc(atomic64_t *v) +{ + atomic64_add(1, v); +} +#define atomic64_inc atomic64_inc +#endif + +#ifndef atomic64_inc_return_relaxed +#ifdef atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return +#define atomic64_inc_return_relaxed atomic64_inc_return +#endif /* atomic64_inc_return */ + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + return atomic64_add_return(1, v); +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + return atomic64_add_return_acquire(1, v); +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + return atomic64_add_return_release(1, v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return_relaxed +static inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + return atomic64_add_return_relaxed(1, v); +} +#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed +#endif + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_inc_return_relaxed(v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#endif /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_fetch_inc_relaxed +#ifdef atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + return atomic64_fetch_add(1, v); +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return atomic64_fetch_add_acquire(1, v); +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + return atomic64_fetch_add_release(1, v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc_relaxed +static inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return atomic64_fetch_add_relaxed(1, v); +} +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed +#endif + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_inc_relaxed(v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#endif /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_dec +static inline void +atomic64_dec(atomic64_t *v) +{ + atomic64_sub(1, v); +} +#define atomic64_dec atomic64_dec +#endif + +#ifndef atomic64_dec_return_relaxed +#ifdef atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return +#define atomic64_dec_return_relaxed atomic64_dec_return +#endif /* atomic64_dec_return */ + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + return atomic64_sub_return(1, v); +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + return atomic64_sub_return_acquire(1, v); +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + return atomic64_sub_return_release(1, v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return_relaxed +static inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + return atomic64_sub_return_relaxed(1, v); +} +#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed +#endif + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_dec_return_relaxed(v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#endif /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_fetch_dec_relaxed +#ifdef atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + return atomic64_fetch_sub(1, v); +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return atomic64_fetch_sub_acquire(1, v); +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + return atomic64_fetch_sub_release(1, v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec_relaxed +static inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return atomic64_fetch_sub_relaxed(1, v); +} +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed +#endif + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_dec_relaxed(v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#endif /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +static inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#endif + +#ifndef atomic64_fetch_and_release +static inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_and_relaxed(i, v); +} +#define atomic64_fetch_and_release atomic64_fetch_and_release +#endif + +#ifndef atomic64_fetch_and +static inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_and atomic64_fetch_and +#endif + +#endif /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_andnot +static inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#define atomic64_andnot atomic64_andnot +#endif + +#ifndef atomic64_fetch_andnot_relaxed +#ifdef atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot_relaxed +static inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#endif + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_andnot_relaxed(i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#endif /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +static inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#endif + +#ifndef atomic64_fetch_or_release +static inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_or_relaxed(i, v); +} +#define atomic64_fetch_or_release atomic64_fetch_or_release +#endif + +#ifndef atomic64_fetch_or +static inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_or atomic64_fetch_or +#endif + +#endif /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +static inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#endif + +#ifndef atomic64_fetch_xor_release +static inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_xor_relaxed(i, v); +} +#define atomic64_fetch_xor_release atomic64_fetch_xor_release +#endif + +#ifndef atomic64_fetch_xor +static inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_xor atomic64_fetch_xor +#endif + +#endif /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg +#define atomic64_xchg_relaxed atomic64_xchg +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +static inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_xchg_acquire atomic64_xchg_acquire +#endif + +#ifndef atomic64_xchg_release +static inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return atomic64_xchg_relaxed(v, i); +} +#define atomic64_xchg_release atomic64_xchg_release +#endif + +#ifndef atomic64_xchg +static inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_xchg atomic64_xchg +#endif + +#endif /* atomic64_xchg_relaxed */ + +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +static inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#endif + +#ifndef atomic64_cmpxchg_release +static inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return atomic64_cmpxchg_relaxed(v, old, new); +} +#define atomic64_cmpxchg_release atomic64_cmpxchg_release +#endif + +#ifndef atomic64_cmpxchg +static inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_cmpxchg atomic64_cmpxchg +#endif + +#endif /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_relaxed +#ifdef atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg +#endif /* atomic64_try_cmpxchg */ + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg_relaxed +static inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +#endif + +#else /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#endif /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_sub_and_test +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} +#define atomic64_sub_and_test atomic64_sub_and_test +#endif + +#ifndef atomic64_dec_and_test +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} +#define atomic64_dec_and_test atomic64_dec_and_test +#endif + +#ifndef atomic64_inc_and_test +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} +#define atomic64_inc_and_test atomic64_inc_and_test +#endif + +#ifndef atomic64_add_negative +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} +#define atomic64_add_negative atomic64_add_negative +#endif + +#ifndef atomic64_fetch_add_unless +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#endif + +#ifndef atomic64_add_unless +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return atomic64_fetch_add_unless(v, a, u) != u; +} +#define atomic64_add_unless atomic64_add_unless +#endif + +#ifndef atomic64_inc_not_zero +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + return atomic64_add_unless(v, 1, 0); +} +#define atomic64_inc_not_zero atomic64_inc_not_zero +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +#endif + +#ifndef atomic64_dec_if_positive +static inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic64_dec_if_positive atomic64_dec_if_positive +#endif + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 1e8e88bdaf09..4c0d009a46f0 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -25,14 +25,6 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ -#ifndef atomic_read_acquire -#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) -#endif - -#ifndef atomic_set_release -#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) -#endif - /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed @@ -79,1238 +71,7 @@ __ret; \ }) -/* atomic_add_return_relaxed */ -#ifndef atomic_add_return_relaxed -#define atomic_add_return_relaxed atomic_add_return -#define atomic_add_return_acquire atomic_add_return -#define atomic_add_return_release atomic_add_return - -#else /* atomic_add_return_relaxed */ - -#ifndef atomic_add_return_acquire -#define atomic_add_return_acquire(...) \ - __atomic_op_acquire(atomic_add_return, __VA_ARGS__) -#endif - -#ifndef atomic_add_return_release -#define atomic_add_return_release(...) \ - __atomic_op_release(atomic_add_return, __VA_ARGS__) -#endif - -#ifndef atomic_add_return -#define atomic_add_return(...) \ - __atomic_op_fence(atomic_add_return, __VA_ARGS__) -#endif -#endif /* atomic_add_return_relaxed */ - -#ifndef atomic_inc -#define atomic_inc(v) atomic_add(1, (v)) -#endif - -/* atomic_inc_return_relaxed */ -#ifndef atomic_inc_return_relaxed - -#ifndef atomic_inc_return -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#else /* atomic_inc_return */ -#define atomic_inc_return_relaxed atomic_inc_return -#define atomic_inc_return_acquire atomic_inc_return -#define atomic_inc_return_release atomic_inc_return -#endif /* atomic_inc_return */ - -#else /* atomic_inc_return_relaxed */ - -#ifndef atomic_inc_return_acquire -#define atomic_inc_return_acquire(...) \ - __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic_inc_return_release -#define atomic_inc_return_release(...) \ - __atomic_op_release(atomic_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic_inc_return -#define atomic_inc_return(...) \ - __atomic_op_fence(atomic_inc_return, __VA_ARGS__) -#endif -#endif /* atomic_inc_return_relaxed */ - -/* atomic_sub_return_relaxed */ -#ifndef atomic_sub_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return -#define atomic_sub_return_acquire atomic_sub_return -#define atomic_sub_return_release atomic_sub_return - -#else /* atomic_sub_return_relaxed */ - -#ifndef atomic_sub_return_acquire -#define atomic_sub_return_acquire(...) \ - __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic_sub_return_release -#define atomic_sub_return_release(...) \ - __atomic_op_release(atomic_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic_sub_return -#define atomic_sub_return(...) \ - __atomic_op_fence(atomic_sub_return, __VA_ARGS__) -#endif -#endif /* atomic_sub_return_relaxed */ - -#ifndef atomic_dec -#define atomic_dec(v) atomic_sub(1, (v)) -#endif - -/* atomic_dec_return_relaxed */ -#ifndef atomic_dec_return_relaxed - -#ifndef atomic_dec_return -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#else /* atomic_dec_return */ -#define atomic_dec_return_relaxed atomic_dec_return -#define atomic_dec_return_acquire atomic_dec_return -#define atomic_dec_return_release atomic_dec_return -#endif /* atomic_dec_return */ - -#else /* atomic_dec_return_relaxed */ - -#ifndef atomic_dec_return_acquire -#define atomic_dec_return_acquire(...) \ - __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic_dec_return_release -#define atomic_dec_return_release(...) \ - __atomic_op_release(atomic_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic_dec_return -#define atomic_dec_return(...) \ - __atomic_op_fence(atomic_dec_return, __VA_ARGS__) -#endif -#endif /* atomic_dec_return_relaxed */ - - -/* atomic_fetch_add_relaxed */ -#ifndef atomic_fetch_add_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add -#define atomic_fetch_add_acquire atomic_fetch_add -#define atomic_fetch_add_release atomic_fetch_add - -#else /* atomic_fetch_add_relaxed */ - -#ifndef atomic_fetch_add_acquire -#define atomic_fetch_add_acquire(...) \ - __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_add_release -#define atomic_fetch_add_release(...) \ - __atomic_op_release(atomic_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_add -#define atomic_fetch_add(...) \ - __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) -#endif -#endif /* atomic_fetch_add_relaxed */ - -/* atomic_fetch_inc_relaxed */ -#ifndef atomic_fetch_inc_relaxed - -#ifndef atomic_fetch_inc -#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) -#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) -#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) -#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) -#else /* atomic_fetch_inc */ -#define atomic_fetch_inc_relaxed atomic_fetch_inc -#define atomic_fetch_inc_acquire atomic_fetch_inc -#define atomic_fetch_inc_release atomic_fetch_inc -#endif /* atomic_fetch_inc */ - -#else /* atomic_fetch_inc_relaxed */ - -#ifndef atomic_fetch_inc_acquire -#define atomic_fetch_inc_acquire(...) \ - __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_inc_release -#define atomic_fetch_inc_release(...) \ - __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_inc -#define atomic_fetch_inc(...) \ - __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) -#endif -#endif /* atomic_fetch_inc_relaxed */ - -/* atomic_fetch_sub_relaxed */ -#ifndef atomic_fetch_sub_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub -#define atomic_fetch_sub_acquire atomic_fetch_sub -#define atomic_fetch_sub_release atomic_fetch_sub - -#else /* atomic_fetch_sub_relaxed */ - -#ifndef atomic_fetch_sub_acquire -#define atomic_fetch_sub_acquire(...) \ - __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_sub_release -#define atomic_fetch_sub_release(...) \ - __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_sub -#define atomic_fetch_sub(...) \ - __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) -#endif -#endif /* atomic_fetch_sub_relaxed */ - -/* atomic_fetch_dec_relaxed */ -#ifndef atomic_fetch_dec_relaxed - -#ifndef atomic_fetch_dec -#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) -#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) -#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) -#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) -#else /* atomic_fetch_dec */ -#define atomic_fetch_dec_relaxed atomic_fetch_dec -#define atomic_fetch_dec_acquire atomic_fetch_dec -#define atomic_fetch_dec_release atomic_fetch_dec -#endif /* atomic_fetch_dec */ - -#else /* atomic_fetch_dec_relaxed */ - -#ifndef atomic_fetch_dec_acquire -#define atomic_fetch_dec_acquire(...) \ - __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_dec_release -#define atomic_fetch_dec_release(...) \ - __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_dec -#define atomic_fetch_dec(...) \ - __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) -#endif -#endif /* atomic_fetch_dec_relaxed */ - -/* atomic_fetch_or_relaxed */ -#ifndef atomic_fetch_or_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or -#define atomic_fetch_or_acquire atomic_fetch_or -#define atomic_fetch_or_release atomic_fetch_or - -#else /* atomic_fetch_or_relaxed */ - -#ifndef atomic_fetch_or_acquire -#define atomic_fetch_or_acquire(...) \ - __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_or_release -#define atomic_fetch_or_release(...) \ - __atomic_op_release(atomic_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_or -#define atomic_fetch_or(...) \ - __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) -#endif -#endif /* atomic_fetch_or_relaxed */ - -/* atomic_fetch_and_relaxed */ -#ifndef atomic_fetch_and_relaxed -#define atomic_fetch_and_relaxed atomic_fetch_and -#define atomic_fetch_and_acquire atomic_fetch_and -#define atomic_fetch_and_release atomic_fetch_and - -#else /* atomic_fetch_and_relaxed */ - -#ifndef atomic_fetch_and_acquire -#define atomic_fetch_and_acquire(...) \ - __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_and_release -#define atomic_fetch_and_release(...) \ - __atomic_op_release(atomic_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_and -#define atomic_fetch_and(...) \ - __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) -#endif -#endif /* atomic_fetch_and_relaxed */ - -#ifndef atomic_andnot -#define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) -#endif - -#ifndef atomic_fetch_andnot_relaxed - -#ifndef atomic_fetch_andnot -#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) -#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) -#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) -#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) -#else /* atomic_fetch_andnot */ -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot -#endif /* atomic_fetch_andnot */ - -#else /* atomic_fetch_andnot_relaxed */ - -#ifndef atomic_fetch_andnot_acquire -#define atomic_fetch_andnot_acquire(...) \ - __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_andnot_release -#define atomic_fetch_andnot_release(...) \ - __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_andnot -#define atomic_fetch_andnot(...) \ - __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) -#endif -#endif /* atomic_fetch_andnot_relaxed */ - -/* atomic_fetch_xor_relaxed */ -#ifndef atomic_fetch_xor_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor -#define atomic_fetch_xor_acquire atomic_fetch_xor -#define atomic_fetch_xor_release atomic_fetch_xor - -#else /* atomic_fetch_xor_relaxed */ - -#ifndef atomic_fetch_xor_acquire -#define atomic_fetch_xor_acquire(...) \ - __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_xor_release -#define atomic_fetch_xor_release(...) \ - __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_xor -#define atomic_fetch_xor(...) \ - __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) -#endif -#endif /* atomic_fetch_xor_relaxed */ - - -/* atomic_xchg_relaxed */ -#ifndef atomic_xchg_relaxed -#define atomic_xchg_relaxed atomic_xchg -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg - -#else /* atomic_xchg_relaxed */ - -#ifndef atomic_xchg_acquire -#define atomic_xchg_acquire(...) \ - __atomic_op_acquire(atomic_xchg, __VA_ARGS__) -#endif - -#ifndef atomic_xchg_release -#define atomic_xchg_release(...) \ - __atomic_op_release(atomic_xchg, __VA_ARGS__) -#endif - -#ifndef atomic_xchg -#define atomic_xchg(...) \ - __atomic_op_fence(atomic_xchg, __VA_ARGS__) -#endif -#endif /* atomic_xchg_relaxed */ - -/* atomic_cmpxchg_relaxed */ -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg - -#else /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_cmpxchg_acquire -#define atomic_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic_cmpxchg_release -#define atomic_cmpxchg_release(...) \ - __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic_cmpxchg -#define atomic_cmpxchg(...) \ - __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) -#endif -#endif /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_try_cmpxchg - -#define __atomic_try_cmpxchg(type, _p, _po, _n) \ -({ \ - typeof(_po) __po = (_po); \ - typeof(*(_po)) __r, __o = *__po; \ - __r = atomic_cmpxchg##type((_p), __o, (_n)); \ - if (unlikely(__r != __o)) \ - *__po = __r; \ - likely(__r == __o); \ -}) - -#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) -#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) -#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) -#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) - -#else /* atomic_try_cmpxchg */ -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg -#define atomic_try_cmpxchg_release atomic_try_cmpxchg -#endif /* atomic_try_cmpxchg */ - -/* cmpxchg_relaxed */ -#ifndef cmpxchg_relaxed -#define cmpxchg_relaxed cmpxchg -#define cmpxchg_acquire cmpxchg -#define cmpxchg_release cmpxchg - -#else /* cmpxchg_relaxed */ - -#ifndef cmpxchg_acquire -#define cmpxchg_acquire(...) \ - __atomic_op_acquire(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg_release -#define cmpxchg_release(...) \ - __atomic_op_release(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg -#define cmpxchg(...) \ - __atomic_op_fence(cmpxchg, __VA_ARGS__) -#endif -#endif /* cmpxchg_relaxed */ - -/* cmpxchg64_relaxed */ -#ifndef cmpxchg64_relaxed -#define cmpxchg64_relaxed cmpxchg64 -#define cmpxchg64_acquire cmpxchg64 -#define cmpxchg64_release cmpxchg64 - -#else /* cmpxchg64_relaxed */ - -#ifndef cmpxchg64_acquire -#define cmpxchg64_acquire(...) \ - __atomic_op_acquire(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64_release -#define cmpxchg64_release(...) \ - __atomic_op_release(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64 -#define cmpxchg64(...) \ - __atomic_op_fence(cmpxchg64, __VA_ARGS__) -#endif -#endif /* cmpxchg64_relaxed */ - -/* xchg_relaxed */ -#ifndef xchg_relaxed -#define xchg_relaxed xchg -#define xchg_acquire xchg -#define xchg_release xchg - -#else /* xchg_relaxed */ - -#ifndef xchg_acquire -#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) -#endif - -#ifndef xchg_release -#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) -#endif - -#ifndef xchg -#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) -#endif -#endif /* xchg_relaxed */ - -/** - * atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns the original value of @v. - */ -#ifndef atomic_fetch_add_unless -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - int c = atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} -#endif - -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static inline bool atomic_add_unless(atomic_t *v, int a, int u) -{ - return atomic_fetch_add_unless(v, a, u) != u; -} - -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -#ifndef atomic_inc_not_zero -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#endif - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic_inc_and_test -static inline bool atomic_inc_and_test(atomic_t *v) -{ - return atomic_inc_return(v) == 0; -} -#endif - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#ifndef atomic_dec_and_test -static inline bool atomic_dec_and_test(atomic_t *v) -{ - return atomic_dec_return(v) == 0; -} -#endif - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic_sub_and_test -static inline bool atomic_sub_and_test(int i, atomic_t *v) -{ - return atomic_sub_return(i, v) == 0; -} -#endif - -/** - * atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#ifndef atomic_add_negative -static inline bool atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} -#endif - -#ifndef atomic_inc_unless_negative -static inline bool atomic_inc_unless_negative(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#endif - -#ifndef atomic_dec_unless_positive -static inline bool atomic_dec_unless_positive(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#endif - -/* - * atomic_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -#ifndef atomic_dec_if_positive -static inline int atomic_dec_if_positive(atomic_t *v) -{ - int dec, c = atomic_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic_try_cmpxchg(v, &c, dec)); - - return dec; -} -#endif - -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) - -#ifdef CONFIG_GENERIC_ATOMIC64 -#include -#endif - -#ifndef atomic64_read_acquire -#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) -#endif - -#ifndef atomic64_set_release -#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) -#endif - -/* atomic64_add_return_relaxed */ -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_relaxed atomic64_add_return -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return - -#else /* atomic64_add_return_relaxed */ - -#ifndef atomic64_add_return_acquire -#define atomic64_add_return_acquire(...) \ - __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) -#endif - -#ifndef atomic64_add_return_release -#define atomic64_add_return_release(...) \ - __atomic_op_release(atomic64_add_return, __VA_ARGS__) -#endif - -#ifndef atomic64_add_return -#define atomic64_add_return(...) \ - __atomic_op_fence(atomic64_add_return, __VA_ARGS__) -#endif -#endif /* atomic64_add_return_relaxed */ - -#ifndef atomic64_inc -#define atomic64_inc(v) atomic64_add(1, (v)) -#endif - -/* atomic64_inc_return_relaxed */ -#ifndef atomic64_inc_return_relaxed - -#ifndef atomic64_inc_return -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#else /* atomic64_inc_return */ -#define atomic64_inc_return_relaxed atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return -#endif /* atomic64_inc_return */ - -#else /* atomic64_inc_return_relaxed */ - -#ifndef atomic64_inc_return_acquire -#define atomic64_inc_return_acquire(...) \ - __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic64_inc_return_release -#define atomic64_inc_return_release(...) \ - __atomic_op_release(atomic64_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic64_inc_return -#define atomic64_inc_return(...) \ - __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) -#endif -#endif /* atomic64_inc_return_relaxed */ - - -/* atomic64_sub_return_relaxed */ -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return - -#else /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_sub_return_acquire -#define atomic64_sub_return_acquire(...) \ - __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic64_sub_return_release -#define atomic64_sub_return_release(...) \ - __atomic_op_release(atomic64_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic64_sub_return -#define atomic64_sub_return(...) \ - __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) -#endif -#endif /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_dec -#define atomic64_dec(v) atomic64_sub(1, (v)) -#endif - -/* atomic64_dec_return_relaxed */ -#ifndef atomic64_dec_return_relaxed - -#ifndef atomic64_dec_return -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#else /* atomic64_dec_return */ -#define atomic64_dec_return_relaxed atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return -#endif /* atomic64_dec_return */ - -#else /* atomic64_dec_return_relaxed */ - -#ifndef atomic64_dec_return_acquire -#define atomic64_dec_return_acquire(...) \ - __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic64_dec_return_release -#define atomic64_dec_return_release(...) \ - __atomic_op_release(atomic64_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic64_dec_return -#define atomic64_dec_return(...) \ - __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) -#endif -#endif /* atomic64_dec_return_relaxed */ - - -/* atomic64_fetch_add_relaxed */ -#ifndef atomic64_fetch_add_relaxed -#define atomic64_fetch_add_relaxed atomic64_fetch_add -#define atomic64_fetch_add_acquire atomic64_fetch_add -#define atomic64_fetch_add_release atomic64_fetch_add - -#else /* atomic64_fetch_add_relaxed */ - -#ifndef atomic64_fetch_add_acquire -#define atomic64_fetch_add_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_add_release -#define atomic64_fetch_add_release(...) \ - __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_add -#define atomic64_fetch_add(...) \ - __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_add_relaxed */ - -/* atomic64_fetch_inc_relaxed */ -#ifndef atomic64_fetch_inc_relaxed - -#ifndef atomic64_fetch_inc -#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) -#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) -#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) -#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) -#else /* atomic64_fetch_inc */ -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc -#define atomic64_fetch_inc_acquire atomic64_fetch_inc -#define atomic64_fetch_inc_release atomic64_fetch_inc -#endif /* atomic64_fetch_inc */ - -#else /* atomic64_fetch_inc_relaxed */ - -#ifndef atomic64_fetch_inc_acquire -#define atomic64_fetch_inc_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_inc_release -#define atomic64_fetch_inc_release(...) \ - __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_inc -#define atomic64_fetch_inc(...) \ - __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_inc_relaxed */ - -/* atomic64_fetch_sub_relaxed */ -#ifndef atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub -#define atomic64_fetch_sub_acquire atomic64_fetch_sub -#define atomic64_fetch_sub_release atomic64_fetch_sub - -#else /* atomic64_fetch_sub_relaxed */ - -#ifndef atomic64_fetch_sub_acquire -#define atomic64_fetch_sub_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_sub_release -#define atomic64_fetch_sub_release(...) \ - __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_sub -#define atomic64_fetch_sub(...) \ - __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_sub_relaxed */ - -/* atomic64_fetch_dec_relaxed */ -#ifndef atomic64_fetch_dec_relaxed - -#ifndef atomic64_fetch_dec -#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) -#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) -#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) -#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) -#else /* atomic64_fetch_dec */ -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec -#define atomic64_fetch_dec_acquire atomic64_fetch_dec -#define atomic64_fetch_dec_release atomic64_fetch_dec -#endif /* atomic64_fetch_dec */ - -#else /* atomic64_fetch_dec_relaxed */ - -#ifndef atomic64_fetch_dec_acquire -#define atomic64_fetch_dec_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_dec_release -#define atomic64_fetch_dec_release(...) \ - __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_dec -#define atomic64_fetch_dec(...) \ - __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_dec_relaxed */ - -/* atomic64_fetch_or_relaxed */ -#ifndef atomic64_fetch_or_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or -#define atomic64_fetch_or_acquire atomic64_fetch_or -#define atomic64_fetch_or_release atomic64_fetch_or - -#else /* atomic64_fetch_or_relaxed */ - -#ifndef atomic64_fetch_or_acquire -#define atomic64_fetch_or_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_or_release -#define atomic64_fetch_or_release(...) \ - __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_or -#define atomic64_fetch_or(...) \ - __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_or_relaxed */ - -/* atomic64_fetch_and_relaxed */ -#ifndef atomic64_fetch_and_relaxed -#define atomic64_fetch_and_relaxed atomic64_fetch_and -#define atomic64_fetch_and_acquire atomic64_fetch_and -#define atomic64_fetch_and_release atomic64_fetch_and - -#else /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_fetch_and_acquire -#define atomic64_fetch_and_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_and_release -#define atomic64_fetch_and_release(...) \ - __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_and -#define atomic64_fetch_and(...) \ - __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_andnot -#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) -#endif - -#ifndef atomic64_fetch_andnot_relaxed - -#ifndef atomic64_fetch_andnot -#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) -#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) -#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) -#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) -#else /* atomic64_fetch_andnot */ -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot -#endif /* atomic64_fetch_andnot */ - -#else /* atomic64_fetch_andnot_relaxed */ - -#ifndef atomic64_fetch_andnot_acquire -#define atomic64_fetch_andnot_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_andnot_release -#define atomic64_fetch_andnot_release(...) \ - __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_andnot -#define atomic64_fetch_andnot(...) \ - __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_andnot_relaxed */ - -/* atomic64_fetch_xor_relaxed */ -#ifndef atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor -#define atomic64_fetch_xor_acquire atomic64_fetch_xor -#define atomic64_fetch_xor_release atomic64_fetch_xor - -#else /* atomic64_fetch_xor_relaxed */ - -#ifndef atomic64_fetch_xor_acquire -#define atomic64_fetch_xor_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_xor_release -#define atomic64_fetch_xor_release(...) \ - __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_xor -#define atomic64_fetch_xor(...) \ - __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_xor_relaxed */ - - -/* atomic64_xchg_relaxed */ -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_relaxed atomic64_xchg -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg - -#else /* atomic64_xchg_relaxed */ - -#ifndef atomic64_xchg_acquire -#define atomic64_xchg_acquire(...) \ - __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) -#endif - -#ifndef atomic64_xchg_release -#define atomic64_xchg_release(...) \ - __atomic_op_release(atomic64_xchg, __VA_ARGS__) -#endif - -#ifndef atomic64_xchg -#define atomic64_xchg(...) \ - __atomic_op_fence(atomic64_xchg, __VA_ARGS__) -#endif -#endif /* atomic64_xchg_relaxed */ - -/* atomic64_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg - -#else /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_cmpxchg_acquire -#define atomic64_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic64_cmpxchg_release -#define atomic64_cmpxchg_release(...) \ - __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic64_cmpxchg -#define atomic64_cmpxchg(...) \ - __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) -#endif -#endif /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_try_cmpxchg - -#define __atomic64_try_cmpxchg(type, _p, _po, _n) \ -({ \ - typeof(_po) __po = (_po); \ - typeof(*(_po)) __r, __o = *__po; \ - __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ - if (unlikely(__r != __o)) \ - *__po = __r; \ - likely(__r == __o); \ -}) - -#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) -#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n) -#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n) -#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n) - -#else /* atomic64_try_cmpxchg */ -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg -#endif /* atomic64_try_cmpxchg */ - -/** - * atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns the original value of @v. - */ -#ifndef atomic64_fetch_add_unless -static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, - long long u) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic64_try_cmpxchg(v, &c, c + a)); - - return c; -} -#endif - -/** - * atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) -{ - return atomic64_fetch_add_unless(v, a, u) != u; -} - -/** - * atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -#ifndef atomic64_inc_not_zero -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#endif - -/** - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic64_inc_and_test -static inline bool atomic64_inc_and_test(atomic64_t *v) -{ - return atomic64_inc_return(v) == 0; -} -#endif - -/** - * atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#ifndef atomic64_dec_and_test -static inline bool atomic64_dec_and_test(atomic64_t *v) -{ - return atomic64_dec_return(v) == 0; -} -#endif - -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic64_sub_and_test -static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) -{ - return atomic64_sub_return(i, v) == 0; -} -#endif - -/** - * atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#ifndef atomic64_add_negative -static inline bool atomic64_add_negative(long long i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} -#endif - -#ifndef atomic64_inc_unless_negative -static inline bool atomic64_inc_unless_negative(atomic64_t *v) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#endif - -#ifndef atomic64_dec_unless_positive -static inline bool atomic64_dec_unless_positive(atomic64_t *v) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#endif - -/* - * atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic64_t - * - * The function returns the old value of *v minus 1, even if - * the atomic64 variable, v, was not decremented. - */ -#ifndef atomic64_dec_if_positive -static inline long long atomic64_dec_if_positive(atomic64_t *v) -{ - long long dec, c = atomic64_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic64_try_cmpxchg(v, &c, dec)); - - return dec; -} -#endif - -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#include #include -- cgit v1.2.3 From 8fc5c73554db0ac18c0c6ac5b2099ab917f83bdf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 9 Nov 2018 12:43:07 -0800 Subject: acpi/nfit, device-dax: Identify differentiated memory with a unique numa-node MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Persistent memory, as described by the ACPI NFIT (NVDIMM Firmware Interface Table), is the first known instance of a memory range described by a unique "target" proximity domain. Where "initiator" and "target" proximity domains is an approach that the ACPI HMAT (Heterogeneous Memory Attributes Table) uses to described the unique performance properties of a memory range relative to a given initiator (e.g. CPU or DMA device). Currently the numa-node for a /dev/pmemX block-device or /dev/daxX.Y char-device follows the traditional notion of 'numa-node' where the attribute conveys the closest online numa-node. That numa-node attribute is useful for cpu-binding and memory-binding processes *near* the device. However, when the memory range backing a 'pmem', or 'dax' device is onlined (memory hot-add) the memory-only-numa-node representing that address needs to be differentiated from the set of online nodes. In other words, the numa-node association of the device depends on whether you can bind processes *near* the cpu-numa-node in the offline device-case, or bind process *on* the memory-range directly after the backing address range is onlined. Allow for the case that platform firmware describes persistent memory with a unique proximity domain, i.e. when it is distinct from the proximity of DRAM and CPUs that are on the same socket. Plumb the Linux numa-node translation of that proximity through the libnvdimm region device to namespaces that are in device-dax mode. With this in place the proposed kmem driver [1] can optionally discover a unique numa-node number for the address range as it transitions the memory from an offline state managed by a device-driver to an online memory range managed by the core-mm. [1]: https://lore.kernel.org/lkml/20181022201317.8558C1D8@viggo.jf.intel.com Reported-by: Fan Du Cc: Michael Ellerman Cc: "Oliver O'Halloran" Cc: Dave Hansen Cc: Jérôme Glisse Reviewed-by: Yang Shi Signed-off-by: Dan Williams --- include/linux/acpi.h | 5 +++++ include/linux/libnvdimm.h | 1 + 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 87715f20b69a..eddf2736e5a6 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -400,12 +400,17 @@ extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_online_node(int pxm); +int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); #else static inline int acpi_map_pxm_to_online_node(int pxm) { return 0; } +static inline int acpi_map_pxm_to_node(int pxm) +{ + return 0; +} static inline int acpi_get_node(acpi_handle handle) { return 0; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..56bc545ad3b2 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -128,6 +128,7 @@ struct nd_region_desc { void *provider_data; int num_lanes; int numa_node; + int target_node; unsigned long flags; struct device_node *of_node; }; -- cgit v1.2.3 From ebc40be2b8eec093abbbd87658a6726ff84a61f5 Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Wed, 7 Nov 2018 11:18:34 +0100 Subject: remoteproc: fix kernel-doc comment for parse_fw Fix the kernel-doc comment for "parse_fw" and fix a typo. Signed-off-by: Fabien Dessenne Signed-off-by: Bjorn Andersson --- include/linux/remoteproc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 507a2b524208..68e72f33c705 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -345,9 +345,9 @@ struct firmware; * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations - * @load_rsc_table: load resource table from firmware image + * @parse_fw: parse firmware to extract information (e.g. resource table) * @find_loaded_rsc_table: find the loaded resouce table - * @load: load firmeware to memory, where the remote processor + * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image * @get_boot_addr: get boot address to entry point specified in firmware -- cgit v1.2.3 From d7dba6be0f31ae61f5f3296aa130f45d57d30f74 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 7 Jan 2019 13:07:36 +0200 Subject: dmaengine: dw: Remove misleading is_private property The commit a9ddb575d6d6 ("dmaengine: dw_dmac: Enhance device tree support") introduces is_private property in uncertain understanding what does it mean. First of all, documentation defines DMA_PRIVATE capability as Documentation/crypto/async-tx-api.txt: The DMA_PRIVATE capability flag is used to tag dma devices that should not be used by the general-purpose allocator. It can be set at initialization time if it is known that a channel will always be private. Alternatively, it is set when dma_request_channel() finds an unused "public" channel. A couple caveats to note when implementing a driver and consumer: 1/ Once a channel has been privately allocated it will no longer be considered by the general-purpose allocator even after a call to dma_release_channel(). 2/ Since capabilities are specified at the device level a dma_device with multiple channels will either have all channels public, or all channels private. Documentation/driver-api/dmaengine/provider.rst: - DMA_PRIVATE The devices only supports slave transfers, and as such isn't available for async transfers. The capability had been introduced by the commit 59b5ec21446b ("dmaengine: introduce dma_request_channel and private channels") and some code didn't changed from that times ever. Taking into consideration above and the fact that on all known platforms Synopsys DesignWare DMA engine is attached to serve slave transfers, the DMA_PRIVATE capability must be enabled for this device unconditionally. Otherwise, as rightfully noticed in drivers/dma/at_xdmac.c: /* * Without DMA_PRIVATE the driver is not able to allocate more than * one channel, second allocation fails in private_candidate. */ because of of a caveats mentioned in above documentation excerpts. So, remove conditional around DMA_PRIVATE followed by removal leftovers. If someone wonders, DMA_PRIVATE can be not used if and only if the all channels of the DMA controller are supposed to serve memory-to-memory like operations. For example, EP93xx has two controllers, one of which can only perform memory-to-memory transfers Note, this change doesn't affect dmatest to be able to test such controllers. Cc: Greg Kroah-Hartman (maintainer:SERIAL DRIVERS) Cc: Dan Williams Signed-off-by: Andy Shevchenko Acked-by: Greg Kroah-Hartman Signed-off-by: Vinod Koul --- include/linux/platform_data/dma-dw.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 1a1d58ebffbf..d443025c5c72 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -38,8 +38,6 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) - * @is_private: The device channels should be marked as private and not for - * by the general purpose DMA channel allocator. * @is_memcpy: The device channels do support memory-to-memory transfers. * @is_idma32: The type of the DMA controller is iDMA32 * @chan_allocation_order: Allocate channels starting from 0 or 7 @@ -53,7 +51,6 @@ struct dw_dma_slave { */ struct dw_dma_platform_data { unsigned int nr_channels; - bool is_private; bool is_memcpy; bool is_idma32; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ -- cgit v1.2.3 From 078165779608873e7b6eae1316a39c73af9f3edc Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 7 Jan 2019 13:07:37 +0200 Subject: dmaengine: dw: Remove unused internal property All known devices, which use DT for configuration, support memory-to-memory transfers. So enable it by default. The rest two cases, i.e. Intel Quark and PPC460ex, instantiate DMA driver and use its channels exclusively for hardware, which means there is no available channel for any other purposes anyway. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- include/linux/platform_data/dma-dw.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index d443025c5c72..1c85eeee4171 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -38,7 +38,6 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) - * @is_memcpy: The device channels do support memory-to-memory transfers. * @is_idma32: The type of the DMA controller is iDMA32 * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. @@ -51,7 +50,6 @@ struct dw_dma_slave { */ struct dw_dma_platform_data { unsigned int nr_channels; - bool is_memcpy; bool is_idma32; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ -- cgit v1.2.3 From 69da8be90d5e85e60b5377c47384154b9dabf592 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 7 Jan 2019 13:07:38 +0200 Subject: dmaengine: dw: Split DW and iDMA 32-bit operations Here is a kinda big refactoring that should have been done in the first place, when Intel iDMA 32-bit support appeared. It splits operations which are different to Synopsys DesignWare and Intel iDMA 32-bit controllers. No functional change intended. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- include/linux/dma/dw.h | 4 ++++ include/linux/platform_data/dma-dw.h | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index e166cac8e870..d643d331c20e 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -45,9 +45,13 @@ struct dw_dma_chip { #if IS_ENABLED(CONFIG_DW_DMAC_CORE) int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); +int idma32_dma_probe(struct dw_dma_chip *chip); +int idma32_dma_remove(struct dw_dma_chip *chip); #else static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } +static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } +static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; } #endif /* CONFIG_DW_DMAC_CORE */ #endif /* _DMA_DW_H */ diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 1c85eeee4171..576048433809 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -38,7 +38,6 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) - * @is_idma32: The type of the DMA controller is iDMA32 * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller @@ -50,7 +49,6 @@ struct dw_dma_slave { */ struct dw_dma_platform_data { unsigned int nr_channels; - bool is_idma32; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; -- cgit v1.2.3 From b466a37fbcc99ef79ea59e40ef6aa8391430b0d8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 7 Jan 2019 13:07:41 +0200 Subject: dmaengine: dw: convert to SPDX identifiers This patch updates license to use SPDX-License-Identifier instead of verbose license text. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- include/linux/dma/dw.h | 5 +---- include/linux/platform_data/dma-dw.h | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index d643d331c20e..9752f3745f76 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2014 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _DMA_DW_H #define _DMA_DW_H diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 576048433809..f3eaf9ec00a1 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H -- cgit v1.2.3 From d0dcde6426ce071ad447fb9d91c85ab649026114 Mon Sep 17 00:00:00 2001 From: Otto Sabart Date: Sun, 6 Jan 2019 00:29:15 +0100 Subject: doc: networking: convert offload files into RST and update references This patch renames offload files. This is necessary for Sphinx. Also update reference to checksum-offloads.rst file. Whole kernel code was grepped for references using: $ grep -r "\(segmentation\|checksum\)-offloads.txt" . There should be no other references to {segmentation,checksum}-offloads.txt files. Signed-off-by: Otto Sabart Acked-by: David S. Miller Signed-off-by: Jonathan Corbet --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..4e671b46e767 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4296,7 +4296,7 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) /* Local Checksum Offload. * Compute outer checksum based on the assumption that the * inner checksum will be offloaded later. - * See Documentation/networking/checksum-offloads.txt for + * See Documentation/networking/checksum-offloads.rst for * explanation of how this works. * Fill in outer checksum adjustment (e.g. with sum of outer * pseudo-header) before calling. -- cgit v1.2.3 From 9ac6cb5fbb1781d120ca0ad29d014d35c9c3f0c4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 19 Dec 2018 17:48:17 +0100 Subject: i2c: add suspended flag and accessors for i2c adapters A few drivers open code the handling of suspended adapters. It could be handled by the core, though, to ensure generic handling. This patch adds the flag and accessor functions. The usage of these helpers is optional, though. See the kerneldoc in this patch. Using the new flag, we now reject further transfers if the adapter is already marked suspended. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- include/linux/i2c.h | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'include/linux') diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 65b4eaed1d96..cba59d66c00d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -680,6 +680,8 @@ struct i2c_adapter { int timeout; /* in jiffies */ int retries; struct device dev; /* the adapter device */ + unsigned long locked_flags; /* owned by the I2C core */ +#define I2C_ALF_IS_SUSPENDED 0 int nr; char name[48]; @@ -762,6 +764,38 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) adapter->lock_ops->unlock_bus(adapter, flags); } +/** + * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core + * @adap: Adapter to mark as suspended + * + * When using this helper to mark an adapter as suspended, the core will reject + * further transfers to this adapter. The usage of this helper is optional but + * recommended for devices having distinct handlers for system suspend and + * runtime suspend. More complex devices are free to implement custom solutions + * to reject transfers when suspended. + */ +static inline void i2c_mark_adapter_suspended(struct i2c_adapter *adap) +{ + i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); + set_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); + i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); +} + +/** + * i2c_mark_adapter_resumed - Report resumed state of the adapter to the core + * @adap: Adapter to mark as resumed + * + * When using this helper to mark an adapter as resumed, the core will allow + * further transfers to this adapter. See also further notes to + * @i2c_mark_adapter_suspended(). + */ +static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) +{ + i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); + clear_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); + i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); +} + /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ -- cgit v1.2.3 From 47008e5161fa097ce9b848dee194b43262b743a5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 19 Sep 2018 16:13:25 -0700 Subject: LSM: Introduce LSM_FLAG_LEGACY_MAJOR This adds a flag for the current "major" LSMs to distinguish them when we have a universal method for ordering all LSMs. It's called "legacy" since the distinction of "major" will go away in the blob-sharing world. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler Reviewed-by: John Johansen --- include/linux/lsm_hooks.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 9a0bdf91e646..318d93f918c3 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2042,8 +2042,11 @@ extern char *lsm_names; extern void security_add_hooks(struct security_hook_list *hooks, int count, char *lsm); +#define LSM_FLAG_LEGACY_MAJOR BIT(0) + struct lsm_info { const char *name; /* Required. */ + unsigned long flags; /* Optional: flags describing LSM */ int (*init)(void); /* Required. */ }; -- cgit v1.2.3 From c5459b829b716dafd226ad270f25c9a3050f7586 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 13 Sep 2018 22:28:48 -0700 Subject: LSM: Plumb visibility into optional "enabled" state In preparation for lifting the "is this LSM enabled?" logic out of the individual LSMs, pass in any special enabled state tracking (as needed for SELinux, AppArmor, and LoadPin). This should be an "int" to include handling any future cases where "enabled" is exposed via sysctl which has no "bool" type. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler Reviewed-by: John Johansen --- include/linux/lsm_hooks.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 318d93f918c3..7bbe5e287161 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2047,6 +2047,7 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count, struct lsm_info { const char *name; /* Required. */ unsigned long flags; /* Optional: flags describing LSM */ + int *enabled; /* Optional: NULL means enabled. */ int (*init)(void); /* Required. */ }; -- cgit v1.2.3 From f4941d75b9cba5e1fae1aebe0139dcca0703a294 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 13 Sep 2018 23:17:50 -0700 Subject: LSM: Lift LSM selection out of individual LSMs As a prerequisite to adjusting LSM selection logic in the future, this moves the selection logic up out of the individual major LSMs, making their init functions only run when actually enabled. This considers all LSMs enabled by default unless they specified an external "enable" variable. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler Reviewed-by: John Johansen --- include/linux/lsm_hooks.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 7bbe5e287161..be1581d18e3e 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2088,7 +2088,6 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, #define __lsm_ro_after_init __ro_after_init #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ -extern int __init security_module_enable(const char *module); extern void __init capability_add_hooks(void); #ifdef CONFIG_SECURITY_YAMA extern void __init yama_add_hooks(void); -- cgit v1.2.3 From a8027fb0d188599ccdb2096f49f708bae04d86c4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 Oct 2018 14:42:57 -0700 Subject: LSM: Tie enabling logic to presence in ordered list Until now, any LSM without an enable storage variable was considered enabled. This inverts the logic and sets defaults to true only if the LSM gets added to the ordered initialization list. (And an exception continues for the major LSMs until they are integrated into the ordered initialization in a later patch.) Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index be1581d18e3e..e28a3aa639e8 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2047,7 +2047,7 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count, struct lsm_info { const char *name; /* Required. */ unsigned long flags; /* Optional: flags describing LSM */ - int *enabled; /* Optional: NULL means enabled. */ + int *enabled; /* Optional: controlled by CONFIG_LSM */ int (*init)(void); /* Required. */ }; -- cgit v1.2.3 From 14bd99c821f7ace0e8110a1bfdfaa27e1788e20f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 19 Sep 2018 19:57:06 -0700 Subject: LSM: Separate idea of "major" LSM from "exclusive" LSM In order to both support old "security=" Legacy Major LSM selection, and handling real exclusivity, this creates LSM_FLAG_EXCLUSIVE and updates the selection logic to handle them. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler --- include/linux/lsm_hooks.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index e28a3aa639e8..c3843b33da9e 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2043,6 +2043,7 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count, char *lsm); #define LSM_FLAG_LEGACY_MAJOR BIT(0) +#define LSM_FLAG_EXCLUSIVE BIT(1) struct lsm_info { const char *name; /* Required. */ -- cgit v1.2.3 From 70b62c25665f636c9f6c700b26af7df296b0887e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 14 Sep 2018 15:26:37 -0700 Subject: LoadPin: Initialize as ordered LSM This converts LoadPin from being a direct "minor" LSM into an ordered LSM. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler --- include/linux/lsm_hooks.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index c3843b33da9e..fb1a653ccfcb 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2095,10 +2095,5 @@ extern void __init yama_add_hooks(void); #else static inline void __init yama_add_hooks(void) { } #endif -#ifdef CONFIG_SECURITY_LOADPIN -void __init loadpin_add_hooks(void); -#else -static inline void loadpin_add_hooks(void) { }; -#endif #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From d6aed64b74b73b64278c059eacd59d87167aa968 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 14 Sep 2018 15:37:20 -0700 Subject: Yama: Initialize as ordered LSM This converts Yama from being a direct "minor" LSM into an ordered LSM. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler --- include/linux/lsm_hooks.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index fb1a653ccfcb..2849e9b2c01d 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2090,10 +2090,5 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ extern void __init capability_add_hooks(void); -#ifdef CONFIG_SECURITY_YAMA -extern void __init yama_add_hooks(void); -#else -static inline void __init yama_add_hooks(void) { } -#endif #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From e2bc445b66cad25b0627391df8138a83d0e48f97 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 19 Sep 2018 17:48:21 -0700 Subject: LSM: Introduce enum lsm_order In preparation for distinguishing the "capability" LSM from other LSMs, it must be ordered first. This introduces LSM_ORDER_MUTABLE for the general LSMs and LSM_ORDER_FIRST for capability. In the future LSM_ORDER_LAST for could be added for anything that must run last (e.g. Landlock may use this). Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 2849e9b2c01d..27d4db9588bb 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2045,8 +2045,14 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count, #define LSM_FLAG_LEGACY_MAJOR BIT(0) #define LSM_FLAG_EXCLUSIVE BIT(1) +enum lsm_order { + LSM_ORDER_FIRST = -1, /* This is only for capabilities. */ + LSM_ORDER_MUTABLE = 0, +}; + struct lsm_info { const char *name; /* Required. */ + enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ unsigned long flags; /* Optional: flags describing LSM */ int *enabled; /* Optional: controlled by CONFIG_LSM */ int (*init)(void); /* Required. */ -- cgit v1.2.3 From d117a154e6128abac5409d3f173584e7b25981a2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 14 Sep 2018 15:40:45 -0700 Subject: capability: Initialize as LSM_ORDER_FIRST This converts capabilities to use the new LSM_ORDER_FIRST position. Signed-off-by: Kees Cook Reviewed-by: Casey Schaufler --- include/linux/lsm_hooks.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 27d4db9588bb..0c908c091a03 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2095,6 +2095,4 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, #define __lsm_ro_after_init __ro_after_init #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ -extern void __init capability_add_hooks(void); - #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From 6d9c939dbe4d0bcea09cd4b410f624cde1acb678 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Fri, 21 Sep 2018 17:16:59 -0700 Subject: procfs: add smack subdir to attrs Back in 2007 I made what turned out to be a rather serious mistake in the implementation of the Smack security module. The SELinux module used an interface in /proc to manipulate the security context on processes. Rather than use a similar interface, I used the same interface. The AppArmor team did likewise. Now /proc/.../attr/current will tell you the security "context" of the process, but it will be different depending on the security module you're using. This patch provides a subdirectory in /proc/.../attr for Smack. Smack user space can use the "current" file in this subdirectory and never have to worry about getting SELinux attributes by mistake. Programs that use the old interface will continue to work (or fail, as the case may be) as before. The proposed S.A.R.A security module is dependent on the mechanism to create its own attr subdirectory. The original implementation is by Kees Cook. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook Signed-off-by: Kees Cook --- include/linux/security.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/security.h b/include/linux/security.h index dbfb5a66babb..b2c5333ed4b5 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -366,8 +366,10 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter); void security_d_instantiate(struct dentry *dentry, struct inode *inode); -int security_getprocattr(struct task_struct *p, char *name, char **value); -int security_setprocattr(const char *name, void *value, size_t size); +int security_getprocattr(struct task_struct *p, const char *lsm, char *name, + char **value); +int security_setprocattr(const char *lsm, const char *name, void *value, + size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); @@ -1112,15 +1114,18 @@ static inline int security_sem_semop(struct kern_ipc_perm *sma, return 0; } -static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) +static inline void security_d_instantiate(struct dentry *dentry, + struct inode *inode) { } -static inline int security_getprocattr(struct task_struct *p, char *name, char **value) +static inline int security_getprocattr(struct task_struct *p, const char *lsm, + char *name, char **value) { return -EINVAL; } -static inline int security_setprocattr(char *name, void *value, size_t size) +static inline int security_setprocattr(const char *lsm, char *name, + void *value, size_t size) { return -EINVAL; } -- cgit v1.2.3 From 3d252529480c68bfd6a6774652df7c8968b28e41 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Fri, 21 Sep 2018 17:17:34 -0700 Subject: SELinux: Remove unused selinux_is_enabled There are no longer users of selinux_is_enabled(). Remove it. As selinux_is_enabled() is the only reason for include/linux/selinux.h remove that as well. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook Signed-off-by: Kees Cook --- include/linux/cred.h | 1 - include/linux/selinux.h | 35 ----------------------------------- 2 files changed, 36 deletions(-) delete mode 100644 include/linux/selinux.h (limited to 'include/linux') diff --git a/include/linux/cred.h b/include/linux/cred.h index 4907c9df86b3..ddd45bb74887 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/selinux.h b/include/linux/selinux.h deleted file mode 100644 index 44f459612690..000000000000 --- a/include/linux/selinux.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SELinux services exported to the rest of the kernel. - * - * Author: James Morris - * - * Copyright (C) 2005 Red Hat, Inc., James Morris - * Copyright (C) 2006 Trusted Computer Solutions, Inc. - * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, - * as published by the Free Software Foundation. - */ -#ifndef _LINUX_SELINUX_H -#define _LINUX_SELINUX_H - -struct selinux_audit_rule; -struct audit_context; -struct kern_ipc_perm; - -#ifdef CONFIG_SECURITY_SELINUX - -/** - * selinux_is_enabled - is SELinux enabled? - */ -bool selinux_is_enabled(void); -#else - -static inline bool selinux_is_enabled(void) -{ - return false; -} -#endif /* CONFIG_SECURITY_SELINUX */ - -#endif /* _LINUX_SELINUX_H */ -- cgit v1.2.3 From bbd3662a834813730912a58efb44dd6df6d952e6 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Mon, 12 Nov 2018 09:30:56 -0800 Subject: Infrastructure management of the cred security blob Move management of the cred security blob out of the security modules and into the security infrastructre. Instead of allocating and freeing space the security modules tell the infrastructure how much space they require. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook [kees: adjusted for ordered init series] Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 0c908c091a03..dd33666567bc 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2027,6 +2027,13 @@ struct security_hook_list { char *lsm; } __randomize_layout; +/* + * Security blob size or offset data. + */ +struct lsm_blob_sizes { + int lbs_cred; +}; + /* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes @@ -2056,6 +2063,7 @@ struct lsm_info { unsigned long flags; /* Optional: flags describing LSM */ int *enabled; /* Optional: controlled by CONFIG_LSM */ int (*init)(void); /* Required. */ + struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ }; extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; @@ -2095,4 +2103,8 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, #define __lsm_ro_after_init __ro_after_init #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ +#ifdef CONFIG_SECURITY +void __init lsm_early_cred(struct cred *cred); +#endif + #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From 33bf60cabcc7687b194a689b068b65e9ecd556be Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Mon, 12 Nov 2018 12:02:49 -0800 Subject: LSM: Infrastructure management of the file security Move management of the file->f_security blob out of the individual security modules and into the infrastructure. The modules no longer allocate or free the data, instead they tell the infrastructure how much space they require. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook [kees: adjusted for ordered init series] Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index dd33666567bc..e8cef019b645 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2032,6 +2032,7 @@ struct security_hook_list { */ struct lsm_blob_sizes { int lbs_cred; + int lbs_file; }; /* -- cgit v1.2.3 From afb1cbe37440c7f38b9cf46fc331cc9dfd5cce21 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Fri, 21 Sep 2018 17:19:29 -0700 Subject: LSM: Infrastructure management of the inode security Move management of the inode->i_security blob out of the individual security modules and into the security infrastructure. Instead of allocating the blobs from within the modules the modules tell the infrastructure how much space is required, and the space is allocated there. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook [kees: adjusted for ordered init series] Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index e8cef019b645..1c798e842de2 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2033,6 +2033,7 @@ struct security_hook_list { struct lsm_blob_sizes { int lbs_cred; int lbs_file; + int lbs_inode; }; /* @@ -2104,6 +2105,8 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, #define __lsm_ro_after_init __ro_after_init #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ +extern int lsm_inode_alloc(struct inode *inode); + #ifdef CONFIG_SECURITY void __init lsm_early_cred(struct cred *cred); #endif -- cgit v1.2.3 From f4ad8f2c40769b3cc9497ba0883bbaf823f7752f Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Fri, 21 Sep 2018 17:19:37 -0700 Subject: LSM: Infrastructure management of the task security Move management of the task_struct->security blob out of the individual security modules and into the security infrastructure. Instead of allocating the blobs from within the modules the modules tell the infrastructure how much space is required, and the space is allocated there. The only user of this blob is AppArmor. The AppArmor use is abstracted to avoid future conflict. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook [kees: adjusted for ordered init series] Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 1c798e842de2..9b39fefa88c4 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2034,6 +2034,7 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_task; }; /* @@ -2109,6 +2110,7 @@ extern int lsm_inode_alloc(struct inode *inode); #ifdef CONFIG_SECURITY void __init lsm_early_cred(struct cred *cred); +void __init lsm_early_task(struct task_struct *task); #endif #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From ecd5f82e05ddd9b06c258167ec7467ac79741d77 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Tue, 20 Nov 2018 11:55:02 -0800 Subject: LSM: Infrastructure management of the ipc security blob Move management of the kern_ipc_perm->security and msg_msg->security blobs out of the individual security modules and into the security infrastructure. Instead of allocating the blobs from within the modules the modules tell the infrastructure how much space is required, and the space is allocated there. Signed-off-by: Casey Schaufler Reviewed-by: Kees Cook [kees: adjusted for ordered init series] Signed-off-by: Kees Cook --- include/linux/lsm_hooks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 9b39fefa88c4..40511a8a5ae6 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2034,6 +2034,8 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_ipc; + int lbs_msg_msg; int lbs_task; }; -- cgit v1.2.3 From 0ada768517dafa1504ef5986ba04f118b7436960 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 8 Jan 2019 16:07:27 +0200 Subject: RDMA/mlx5: Delete declaration of already removed function The implementation of mlx5_core_page_fault_resume() was removed in commit d5d284b829a6 ("{net,IB}/mlx5: Move Page fault EQ and ODP logic to RDMA"). This patch removes declaration too. Fixes: d5d284b829a6 ("{net,IB}/mlx5: Move Page fault EQ and ODP logic to RDMA") Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/linux/mlx5/driver.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 54299251d40d..b6f5839f129a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -939,10 +939,6 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, - u32 wq_num, u8 type, int error); -#endif int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); -- cgit v1.2.3 From f3186dd876697e696d07136623d5cf0a6fb0bc0f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 7 Jan 2019 16:51:50 +0100 Subject: spi: Optionally use GPIO descriptors for CS GPIOs This augments the SPI core to optionally use GPIO descriptors for chip select on a per-master-driver opt-in basis. Drivers using this will rely on the SPI core to look up GPIO descriptors associated with the device, such as when using device tree or board files with GPIO descriptor tables. When getting descriptors from the device tree, this will in turn activate the code in gpiolib that was added in commit 6953c57ab172 ("gpio: of: Handle SPI chipselect legacy bindings") which means that these descriptors are aware of the active low semantics that is the default for SPI CS GPIO lines and we can assume that all of these are "active high" and thus assign SPI_CS_HIGH to all CS lines on the DT path. The previously used gpio_set_value() would call down into gpiod_set_raw_value() and ignore the polarity inversion semantics. It seems like many drivers go to great lengths to set up the CS GPIO line as non-asserted, respecting SPI_CS_HIGH. We pull this out of the SPI drivers and into the core, and by simply requesting the line as GPIOD_OUT_LOW when retrieveing it from the device and relying on the gpiolib to handle any inversion semantics. This way a lot of code can be simplified and removed in each converted driver. The end goal after dealing with each driver in turn, is to delete the non-descriptor path (of_spi_register_master() for example) and let the core deal with only descriptors. The different SPI drivers have complex interactions with the core so we cannot simply change them all over, we need to use a stepwise, bisectable approach so that each driver can be converted and fixed in isolation. This patch has the intended side effect of adding support for ACPI GPIOs as it starts relying on gpiod_get_*() to get the GPIO handle associated with the device. Cc: Linuxarm Acked-by: Jonathan Cameron Tested-by: Fangjian (Turing) Signed-off-by: Linus Walleij Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 314d922ca607..916bba47d156 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -12,6 +12,7 @@ #include #include #include +#include struct dma_chan; struct property_entry; @@ -116,7 +117,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging - * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when + * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when + * not using a GPIO line) use cs_gpiod in new drivers by opting in on + * the spi_master. + * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when * not using a GPIO line) * * @statistics: statistics for the spi_device @@ -163,7 +167,8 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - int cs_gpio; /* chip select gpio */ + int cs_gpio; /* LEGACY: chip select gpio */ + struct gpio_desc *cs_gpiod; /* chip select gpio desc */ /* the statistics */ struct spi_statistics statistics; @@ -376,9 +381,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS - * number. Any individual value may be -ENOENT for CS lines that + * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per + * CS number. Any individual value may be -ENOENT for CS lines that + * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods + * in new drivers. + * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS + * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). + * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab + * GPIO descriptors rather than using global GPIO numbers grabbed by the + * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, + * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. * @statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel @@ -557,6 +570,8 @@ struct spi_controller { /* gpio chip select */ int *cs_gpios; + struct gpio_desc **cs_gpiods; + bool use_gpio_descriptors; /* statistics */ struct spi_statistics statistics; -- cgit v1.2.3 From 5e6acc3e678ed3db746ab4fb53a980861cd711b6 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 12 Dec 2018 15:51:47 -0800 Subject: bcm2835-pm: Move bcm2835-watchdog's DT probe to an MFD. The PM block that the wdt driver was binding to actually has multiple features we want to expose (power domains, reset, watchdog). Move the DT attachment to a MFD driver and make WDT probe against MFD. Signed-off-by: Eric Anholt Reviewed-by: Guenter Roeck Acked-by: Stefan Wahren Signed-off-by: Stefan Wahren --- include/linux/mfd/bcm2835-pm.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 include/linux/mfd/bcm2835-pm.h (limited to 'include/linux') diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h new file mode 100644 index 000000000000..b7d0ee1feffa --- /dev/null +++ b/include/linux/mfd/bcm2835-pm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef BCM2835_MFD_PM_H +#define BCM2835_MFD_PM_H + +#include + +struct bcm2835_pm { + struct device *dev; + void __iomem *base; +}; + +#endif /* BCM2835_MFD_PM_H */ -- cgit v1.2.3 From 670c672608a1ffcbc7ac0f872734843593bb8b15 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 12 Dec 2018 15:51:48 -0800 Subject: soc: bcm: bcm2835-pm: Add support for power domains under a new binding. This provides a free software alternative to raspberrypi-power.c's firmware calls to manage power domains. It also exposes a reset line, where previously the vc4 driver had to try to force power off the domain in order to trigger a reset. Signed-off-by: Eric Anholt Acked-by: Rob Herring Acked-by: Stefan Wahren Signed-off-by: Stefan Wahren --- include/linux/mfd/bcm2835-pm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h index b7d0ee1feffa..ed37dc40e82a 100644 --- a/include/linux/mfd/bcm2835-pm.h +++ b/include/linux/mfd/bcm2835-pm.h @@ -8,6 +8,7 @@ struct bcm2835_pm { struct device *dev; void __iomem *base; + void __iomem *asb; }; #endif /* BCM2835_MFD_PM_H */ -- cgit v1.2.3 From 03c87b95ac04c2a34045641b25dded6e3e889556 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Wed, 9 Jan 2019 18:44:00 +0100 Subject: regulator: provide rdev_get_regmap() Provide a helper allowing to access regulator's regmap. Signed-off-by: Bartosz Golaszewski Signed-off-by: Mark Brown --- include/linux/regulator/driver.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 389bcaf7900f..795b38a06b6c 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -503,6 +503,7 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev, void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); +struct regmap *rdev_get_regmap(struct regulator_dev *rdev); int rdev_get_id(struct regulator_dev *rdev); int regulator_mode_to_status(unsigned int); -- cgit v1.2.3 From 412e60373245fd1dfae8d4d44c5d1406b3d90971 Mon Sep 17 00:00:00 2001 From: Martin Sperl Date: Tue, 8 Jan 2019 12:13:45 +0000 Subject: spi: core: avoid waking pump thread from spi_sync instead run teardown delayed When spi_sync is running alone with no other spi devices connected to the bus the worker thread is woken during spi_finalize_current_message to run the teardown code every time. This is totally unnecessary in the case that there is no message queued. On a multi-core system this results in one wakeup of the thread for each spi_message processed via spi_sync where in most cases the teardown does not happen as the hw is already in use. This patch now delays the teardown by 1 second by using a separate kthread_delayed_work for the teardown. This avoids waking the kthread too often. For spi_sync transfers in a tight loop (say 40k messages/s) this avoids the penalty of waking the worker thread 40k times/s. On a rasperry pi 3 with 4 cores the results in 32% of a single core only to find out that there is nothing in the queue and it can go back to sleep. With this patch applied the spi-worker is woken exactly once: after the load finishes and the spi bus is idle for 1 second. I believe I have also seen situations where during a spi_sync loop the worker thread (triggered by the last message finished) is slightly faster and _wins_ the race to process the message, so we are actually running the kthread and letting it do some work... This is also no longer observed with this patch applied as. Tested with a new CAN controller driver for the mcp2517fd which uses spi_sync for interrupt handling and spi_async for scheduling of can frames for transmission (in a different thread) Some statistics when receiving 100000 CAN frames with the mcp25xxfd driver on a Raspberry pi 3: without the patch: ------------------ root@raspcm3:~# for x in $(pgrep spi0) $(pgrep irq/94-mcp25xxf) ; do awk '{printf "%-20s %6i\n", $2,$15}' /proc/$x/stat; done (spi0) 5 (irq/94-mcp25xxf) 0 root@raspcm3:~# vmstat 1 procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- r b swpd free buff cache si so bi bo in cs us sy id wa st 1 0 0 821960 13592 50848 0 0 80 2 1986 105 1 2 97 0 0 0 0 0 821968 13592 50876 0 0 0 0 8046 30 0 0 100 0 0 0 0 0 821936 13592 50876 0 0 0 0 8032 24 0 0 100 0 0 0 0 0 821936 13592 50876 0 0 0 0 8035 30 0 0 100 0 0 0 0 0 821936 13592 50876 0 0 0 0 8033 22 0 0 100 0 0 2 0 0 821936 13592 50876 0 0 0 0 11598 7129 0 3 97 0 0 1 0 0 821872 13592 50876 0 0 0 0 37741 59003 0 31 69 0 0 2 0 0 821840 13592 50876 0 0 0 0 37762 59078 0 29 71 0 0 2 0 0 821776 13592 50876 0 0 0 0 37593 58792 0 28 72 0 0 1 0 0 821744 13592 50876 0 0 0 0 37642 58881 0 30 70 0 0 2 0 0 821680 13592 50876 0 0 0 0 37490 58602 0 27 73 0 0 1 0 0 821648 13592 50876 0 0 0 0 37412 58418 0 29 71 0 0 1 0 0 821584 13592 50876 0 0 0 0 37337 58288 0 27 73 0 0 1 0 0 821552 13592 50876 0 0 0 0 37584 58774 0 27 73 0 0 0 0 0 821520 13592 50876 0 0 0 0 18363 20566 0 9 91 0 0 0 0 0 821520 13592 50876 0 0 0 0 8037 32 0 0 100 0 0 0 0 0 821520 13592 50876 0 0 0 0 8031 23 0 0 100 0 0 0 0 0 821520 13592 50876 0 0 0 0 8034 26 0 0 100 0 0 0 0 0 821520 13592 50876 0 0 0 0 8033 24 0 0 100 0 0 ^C root@raspcm3:~# for x in $(pgrep spi0) $(pgrep irq/94-mcp25xxf) ; do awk '{printf "%-20s %6i\n", $2,$15}' /proc/$x/stat; done (spi0) 228 (irq/94-mcp25xxf) 794 root@raspcm3:~# cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 17: 34 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox 27: 1 0 0 0 ARMCTRL-level 35 Edge timer 33: 1416870 0 0 0 ARMCTRL-level 41 Edge 3f980000.usb, dwc2_hsotg:usb1 34: 1 0 0 0 ARMCTRL-level 42 Edge vc4 35: 0 0 0 0 ARMCTRL-level 43 Edge 3f004000.txp 40: 1753 0 0 0 ARMCTRL-level 48 Edge DMA IRQ 42: 11 0 0 0 ARMCTRL-level 50 Edge DMA IRQ 44: 11 0 0 0 ARMCTRL-level 52 Edge DMA IRQ 45: 0 0 0 0 ARMCTRL-level 53 Edge DMA IRQ 66: 0 0 0 0 ARMCTRL-level 74 Edge vc4 crtc 69: 0 0 0 0 ARMCTRL-level 77 Edge vc4 crtc 70: 0 0 0 0 ARMCTRL-level 78 Edge vc4 crtc 77: 20 0 0 0 ARMCTRL-level 85 Edge 3f205000.i2c, 3f804000.i2c, 3f805000.i2c 78: 6346 0 0 0 ARMCTRL-level 86 Edge 3f204000.spi 80: 205 0 0 0 ARMCTRL-level 88 Edge mmc0 81: 493 0 0 0 ARMCTRL-level 89 Edge uart-pl011 89: 0 0 0 0 bcm2836-timer 0 Edge arch_timer 90: 4291 3821 2180 1649 bcm2836-timer 1 Edge arch_timer 94: 14289 0 0 0 pinctrl-bcm2835 16 Level mcp25xxfd IPI0: 0 0 0 0 CPU wakeup interrupts IPI1: 0 0 0 0 Timer broadcast interrupts IPI2: 3645 242371 7919 1328 Rescheduling interrupts IPI3: 112 543 273 194 Function call interrupts IPI4: 0 0 0 0 CPU stop interrupts IPI5: 1 0 0 0 IRQ work interrupts IPI6: 0 0 0 0 completion interrupts Err: 0 top shows 93% for the mcp25xxfd interrupt handler, 31% for spi0. with the patch: --------------- root@raspcm3:~# for x in $(pgrep spi0) $(pgrep irq/94-mcp25xxf) ; do awk '{printf "%-20s %6i\n", $2,$15}' /proc/$x/stat; done (spi0) 0 (irq/94-mcp25xxf) 0 root@raspcm3:~# vmstat 1 procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- 0 0 0 804768 13584 62628 0 0 0 0 8038 24 0 0 100 0 0 0 0 0 804768 13584 62628 0 0 0 0 8042 25 0 0 100 0 0 1 0 0 804704 13584 62628 0 0 0 0 9603 2967 0 20 80 0 0 1 0 0 804672 13584 62628 0 0 0 0 9828 3380 0 24 76 0 0 1 0 0 804608 13584 62628 0 0 0 0 9823 3375 0 23 77 0 0 1 0 0 804608 13584 62628 0 0 0 12 9829 3394 0 23 77 0 0 1 0 0 804544 13584 62628 0 0 0 0 9816 3362 0 22 78 0 0 1 0 0 804512 13584 62628 0 0 0 0 9817 3367 0 23 77 0 0 1 0 0 804448 13584 62628 0 0 0 0 9822 3370 0 22 78 0 0 1 0 0 804416 13584 62628 0 0 0 0 9815 3367 0 23 77 0 0 0 0 0 804352 13584 62628 0 0 0 84 9222 2250 0 14 86 0 0 0 0 0 804352 13592 62620 0 0 0 24 8131 209 0 0 93 7 0 0 0 0 804320 13592 62628 0 0 0 0 8041 27 0 0 100 0 0 0 0 0 804352 13592 62628 0 0 0 0 8040 26 0 0 100 0 0 root@raspcm3:~# for x in $(pgrep spi0) $(pgrep irq/94-mcp25xxf) ; do awk '{printf "%-20s %6i\n", $2,$15}' /proc/$x/stat; done (spi0) 0 (irq/94-mcp25xxf) 767 root@raspcm3:~# cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 17: 29 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox 27: 1 0 0 0 ARMCTRL-level 35 Edge timer 33: 1024412 0 0 0 ARMCTRL-level 41 Edge 3f980000.usb, dwc2_hsotg:usb1 34: 1 0 0 0 ARMCTRL-level 42 Edge vc4 35: 0 0 0 0 ARMCTRL-level 43 Edge 3f004000.txp 40: 1773 0 0 0 ARMCTRL-level 48 Edge DMA IRQ 42: 11 0 0 0 ARMCTRL-level 50 Edge DMA IRQ 44: 11 0 0 0 ARMCTRL-level 52 Edge DMA IRQ 45: 0 0 0 0 ARMCTRL-level 53 Edge DMA IRQ 66: 0 0 0 0 ARMCTRL-level 74 Edge vc4 crtc 69: 0 0 0 0 ARMCTRL-level 77 Edge vc4 crtc 70: 0 0 0 0 ARMCTRL-level 78 Edge vc4 crtc 77: 20 0 0 0 ARMCTRL-level 85 Edge 3f205000.i2c, 3f804000.i2c, 3f805000.i2c 78: 6417 0 0 0 ARMCTRL-level 86 Edge 3f204000.spi 80: 237 0 0 0 ARMCTRL-level 88 Edge mmc0 81: 489 0 0 0 ARMCTRL-level 89 Edge uart-pl011 89: 0 0 0 0 bcm2836-timer 0 Edge arch_timer 90: 4048 3704 2383 1892 bcm2836-timer 1 Edge arch_timer 94: 14287 0 0 0 pinctrl-bcm2835 16 Level mcp25xxfd IPI0: 0 0 0 0 CPU wakeup interrupts IPI1: 0 0 0 0 Timer broadcast interrupts IPI2: 2361 2948 7890 1616 Rescheduling interrupts IPI3: 65 617 301 166 Function call interrupts IPI4: 0 0 0 0 CPU stop interrupts IPI5: 1 0 0 0 IRQ work interrupts IPI6: 0 0 0 0 completion interrupts Err: 0 top shows 91% for the mcp25xxfd interrupt handler, 0% for spi0 So we see that spi0 is no longer getting scheduled wasting CPU cycles There are a lot less context switches and corresponding Rescheduling interrupts All of these show that this improves efficiency of the system and reduces CPU utilization. Signed-off-by: Martin Sperl Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 916bba47d156..79ad62e2487c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -334,6 +334,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @kworker: thread struct for message pump * @kworker_task: pointer to task for message pump kworker thread * @pump_messages: work struct for scheduling work to the message pump + * @pump_idle_teardown: work structure for scheduling a teardown delayed * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue * @idling: the device is entering idle state @@ -532,6 +533,7 @@ struct spi_controller { struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work pump_messages; + struct kthread_delayed_work pump_idle_teardown; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; -- cgit v1.2.3 From 19e99de9a53f9ece6baf8e9a15428aedd4b20c86 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 8 Jan 2019 10:15:36 +0100 Subject: ARM: davinci: remove dead code related to MAC address reading There are no more users of davinci_get_mac_addr(). Remove it. Signed-off-by: Bartosz Golaszewski Signed-off-by: Sekhar Nori --- include/linux/davinci_emac.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 05b97144d342..28e6cf1356da 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h @@ -46,5 +46,4 @@ enum { EMAC_VERSION_2, /* DM646x */ }; -void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); #endif -- cgit v1.2.3 From cc2d22477779f189595db5c515bd5ef9c75a1f35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 7 Jan 2019 20:49:39 +0100 Subject: pwm: Drop per-chip dbg_show callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This callback was introduced in commit 62099abf67a2 ("pwm: Add debugfs interface") in 2012 and up to now there is not a single user. So drop this unused code. Signed-off-by: Uwe Kleine-König [thierry.reding@gmail.com: remove kerneldoc for ->dbg_show()] Signed-off-by: Thierry Reding --- include/linux/pwm.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pwm.h b/include/linux/pwm.h index d5199b507d79..6a544cb89de4 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -254,7 +254,6 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * @get_state: get the current PWM state. This function is only * called once per PWM device when the PWM chip is * registered. - * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs */ struct pwm_ops { @@ -272,9 +271,6 @@ struct pwm_ops { struct pwm_state *state); void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); -#ifdef CONFIG_DEBUG_FS - void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); -#endif struct module *owner; }; -- cgit v1.2.3 From 5d0a4c11896e8b83f816f135c24b184d4ba57741 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 7 Jan 2019 20:49:41 +0100 Subject: pwm: Rearrange structures to group members by purpose MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In pwm_ops there are a few callbacks that are not supposed to be used by new drivers. Group them at the end of the structure and add a comment. Similarily for struct pwm_chip group the members that drivers shouldn't care about at the end and mark them as internal with another comment. Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- include/linux/pwm.h | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 6a544cb89de4..b628abfffacc 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -242,11 +242,7 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM - * @config: configure duty cycles and period length for this PWM - * @set_polarity: configure the polarity of this PWM * @capture: capture and report PWM signal - * @enable: enable PWM output toggling - * @disable: disable PWM output toggling * @apply: atomically apply a new PWM config. The state argument * should be adjusted with the real hardware config (if the * approximate the period or duty_cycle value, state should @@ -255,48 +251,55 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * called once per PWM device when the PWM chip is * registered. * @owner: helps prevent removal of modules exporting active PWMs + * @config: configure duty cycles and period length for this PWM + * @set_polarity: configure the polarity of this PWM + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); - int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns); - int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity); int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); - int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); - void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); struct module *owner; + + /* Only used by legacy drivers */ + int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns); + int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity); + int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); }; /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs - * @list: list node for internal use * @ops: callbacks for this PWM controller * @base: number of first PWM controlled by this chip * @npwm: number of PWMs controlled by this chip - * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier + * @list: list node for internal use + * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { struct device *dev; - struct list_head list; const struct pwm_ops *ops; int base; unsigned int npwm; - struct pwm_device *pwms; - struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; + + /* only used internally by the PWM framework */ + struct list_head list; + struct pwm_device *pwms; }; /** -- cgit v1.2.3 From 5a1c18b761ddb299a06746948b9ec2814b04fa92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 2 Jan 2019 00:00:01 +0100 Subject: bcma: keep a direct pointer to the struct device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Accessing struct device is pretty useful/common so having a direct pointer: 1) Simplifies some code 2) Makes bcma_bus_get_host_dev() unneeded 3) Allows further improvements like using dev_* printing helpers Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- include/linux/bcma/bcma.h | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index ef61f3607e99..60b94b944e9f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -332,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom( struct ssb_sprom *out)); struct bcma_bus { + struct device *dev; + /* The MMIO area. */ void __iomem *mmio; @@ -339,14 +341,7 @@ struct bcma_bus { enum bcma_hosttype hosttype; bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ - union { - /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ - struct pci_dev *host_pci; - /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ - struct sdio_func *host_sdio; - /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ - struct platform_device *host_pdev; - }; + struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */ struct bcma_chipinfo chipinfo; -- cgit v1.2.3 From c1a85a00ea66cb6f0bd0f14e47c28c2b0999799f Mon Sep 17 00:00:00 2001 From: Micah Morton Date: Mon, 7 Jan 2019 16:10:53 -0800 Subject: LSM: generalize flag passing to security_capable This patch provides a general mechanism for passing flags to the security_capable LSM hook. It replaces the specific 'audit' flag that is used to tell security_capable whether it should log an audit message for the given capability check. The reason for generalizing this flag passing is so we can add an additional flag that signifies whether security_capable is being called by a setid syscall (which is needed by the proposed SafeSetID LSM). Signed-off-by: Micah Morton Reviewed-by: Kees Cook Signed-off-by: James Morris --- include/linux/lsm_hooks.h | 8 +++++--- include/linux/security.h | 28 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 40511a8a5ae6..195707210975 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1270,7 +1270,7 @@ * @cred contains the credentials to use. * @ns contains the user namespace we want the capability in * @cap contains the capability . - * @audit contains whether to write an audit message or not + * @opts contains options for the capable check * Return 0 if the capability is granted for @tsk. * @syslog: * Check permission before accessing the kernel message ring or changing @@ -1446,8 +1446,10 @@ union security_list_options { const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); - int (*capable)(const struct cred *cred, struct user_namespace *ns, - int cap, int audit); + int (*capable)(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts); int (*quotactl)(int cmds, int type, int id, struct super_block *sb); int (*quota_on)(struct dentry *dentry); int (*syslog)(int type); diff --git a/include/linux/security.h b/include/linux/security.h index b2c5333ed4b5..13537a49ae97 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -54,9 +54,12 @@ struct xattr; struct xfrm_sec_ctx; struct mm_struct; +/* Default (no) options for the capable function */ +#define CAP_OPT_NONE 0x0 /* If capable should audit the security request */ -#define SECURITY_CAP_NOAUDIT 0 -#define SECURITY_CAP_AUDIT 1 +#define CAP_OPT_NOAUDIT BIT(1) +/* If capable is being called by a setid function */ +#define CAP_OPT_INSETID BIT(2) /* LSM Agnostic defines for sb_set_mnt_opts */ #define SECURITY_LSM_NATIVE_LABELS 1 @@ -72,7 +75,7 @@ enum lsm_event { /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, - int cap, int audit); + int cap, unsigned int opts); extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); @@ -207,10 +210,10 @@ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -int security_capable(const struct cred *cred, struct user_namespace *ns, - int cap); -int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, - int cap); +int security_capable(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); @@ -464,14 +467,11 @@ static inline int security_capset(struct cred *new, } static inline int security_capable(const struct cred *cred, - struct user_namespace *ns, int cap) + struct user_namespace *ns, + int cap, + unsigned int opts) { - return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT); -} - -static inline int security_capable_noaudit(const struct cred *cred, - struct user_namespace *ns, int cap) { - return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); + return cap_capable(cred, ns, cap, opts); } static inline int security_quotactl(int cmds, int type, int id, -- cgit v1.2.3 From bec9ba7f37631e794cbfaa4c2274074d631217a9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 16 Dec 2018 19:12:18 -0800 Subject: crypto: cipher - remove struct cipher_desc 'struct cipher_desc' is unused. Remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/linux/crypto.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 902ec171fc6d..c3c98a62e503 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -188,14 +188,6 @@ struct blkcipher_desc { u32 flags; }; -struct cipher_desc { - struct crypto_tfm *tfm; - void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, - const u8 *src, unsigned int nbytes); - void *info; -}; - /** * DOC: Block Cipher Algorithm Definitions * -- cgit v1.2.3 From 5b438f4ba315db4f8c1489d175656798d58c014f Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Fri, 11 Jan 2019 13:04:57 +0800 Subject: iommu/vt-d: Support page request in scalable mode VT-d Rev3.0 has made a few changes to the page request interface, 1. widened PRQ descriptor from 128 bits to 256 bits; 2. removed streaming response type; 3. introduced private data that requires page response even the request is not last request in group (LPIG). This is a supplement to commit 1c4f88b7f1f92 ("iommu/vt-d: Shared virtual address in scalable mode") and makes the svm code compliant with VT-d Rev3.0. Cc: Ashok Raj Cc: Liu Yi L Cc: Kevin Tian Signed-off-by: Jacob Pan Fixes: 1c4f88b7f1f92 ("iommu/vt-d: Shared virtual address in scalable mode") Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 21 +++++++++------------ include/linux/intel-svm.h | 2 +- 2 files changed, 10 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 0605f3bf6e79..fa364de9db18 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -374,20 +374,17 @@ enum { #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 -#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) -#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) -#define QI_PGRP_RESP_CODE(res) ((u64)(res)) -#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) -#define QI_PGRP_DID(did) (((u64)(did)) << 16) +/* Page group response descriptor QW0 */ #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) +#define QI_PGRP_PDP(p) (((u64)(p)) << 5) +#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) +#define QI_PGRP_DID(rid) (((u64)(rid)) << 16) +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) + +/* Page group response descriptor QW1 */ +#define QI_PGRP_LPIG(x) (((u64)(x)) << 2) +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) -#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) -#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) -#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) -#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) -#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) -#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) -#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) #define QI_RESP_SUCCESS 0x0 #define QI_RESP_INVALID 0x1 diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 99bc5b3ae26e..e3f76315ca4d 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -20,7 +20,7 @@ struct device; struct svm_dev_ops { void (*fault_cb)(struct device *dev, int pasid, u64 address, - u32 private, int rwxp, int response); + void *private, int rwxp, int response); }; /* Values for rxwp in fault_cb callback */ -- cgit v1.2.3 From 19514910d021c93c7823ec32067e6b7dea224f0f Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 9 Jan 2019 13:43:19 +0100 Subject: livepatch: Change unsigned long old_addr -> void *old_func in struct klp_func The address of the to be patched function and new function is stored in struct klp_func as: void *new_func; unsigned long old_addr; The different naming scheme and type are derived from the way the addresses are set. @old_addr is assigned at runtime using kallsyms-based search. @new_func is statically initialized, for example: static struct klp_func funcs[] = { { .old_name = "cmdline_proc_show", .new_func = livepatch_cmdline_proc_show, }, { } }; This patch changes unsigned long old_addr -> void *old_func. It removes some confusion when these address are later used in the code. It is motivated by a followup patch that adds special NOP struct klp_func where we want to assign func->new_func = func->old_addr respectively func->new_func = func->old_func. This patch does not modify the existing behavior. Suggested-by: Josh Poimboeuf Signed-off-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Joe Lawrence Acked-by: Alice Ferrazzi Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index aec44b1d9582..634e13876380 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -40,7 +40,7 @@ * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) - * @old_addr: the address of the function being patched + * @old_func: pointer to the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function @@ -77,7 +77,7 @@ struct klp_func { unsigned long old_sympos; /* internal */ - unsigned long old_addr; + void *old_func; struct kobject kobj; struct list_head stack_node; unsigned long old_size, new_size; -- cgit v1.2.3 From 0430f78bf38f9972f0cf0522709cc63d49fa164c Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 9 Jan 2019 13:43:21 +0100 Subject: livepatch: Consolidate klp_free functions The code for freeing livepatch structures is a bit scattered and tricky: + direct calls to klp_free_*_limited() and kobject_put() are used to release partially initialized objects + klp_free_patch() removes the patch from the public list and releases all objects except for patch->kobj + object_put(&patch->kobj) and the related wait_for_completion() are called directly outside klp_mutex; this code is duplicated; Now, we are going to remove the registration stage to simplify the API and the code. This would require handling more situations in klp_enable_patch() error paths. More importantly, we are going to add a feature called atomic replace. It will need to dynamically create func and object structures. We will want to reuse the existing init() and free() functions. This would create even more error path scenarios. This patch implements more straightforward free functions: + checks kobj_added flag instead of @limit[*] + initializes patch->list early so that the check for empty list always works + The action(s) that has to be done outside klp_mutex are done in separate klp_free_patch_finish() function. It waits only when patch->kobj was really released via the _start() part. The patch does not change the existing behavior. [*] We need our own flag to track that the kobject was successfully added to the hierarchy. Note that kobj.state_initialized only indicates that kobject has been initialized, not whether is has been added (and needs to be removed on cleanup). Signed-off-by: Petr Mladek Cc: Josh Poimboeuf Cc: Miroslav Benes Cc: Jessica Yu Cc: Jiri Kosina Cc: Jason Baron Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 634e13876380..6978785bc059 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -45,6 +45,7 @@ * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function + * @kobj_added: @kobj has been added and needs freeing * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted * @@ -81,6 +82,7 @@ struct klp_func { struct kobject kobj; struct list_head stack_node; unsigned long old_size, new_size; + bool kobj_added; bool patched; bool transition; }; @@ -117,6 +119,7 @@ struct klp_callbacks { * @kobj: kobject for sysfs resources * @mod: kernel module associated with the patched object * (NULL for vmlinux) + * @kobj_added: @kobj has been added and needs freeing * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { @@ -128,6 +131,7 @@ struct klp_object { /* internal */ struct kobject kobj; struct module *mod; + bool kobj_added; bool patched; }; @@ -137,6 +141,7 @@ struct klp_object { * @objs: object entries for kernel objects to be patched * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources + * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) * @finish: for waiting till it is safe to remove the patch module */ @@ -148,6 +153,7 @@ struct klp_patch { /* internal */ struct list_head list; struct kobject kobj; + bool kobj_added; bool enabled; struct completion finish; }; -- cgit v1.2.3 From 68007289bf3cd937a5b8fc4987d2787167bd06ca Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 9 Jan 2019 13:43:22 +0100 Subject: livepatch: Don't block the removal of patches loaded after a forced transition module_put() is currently never called in klp_complete_transition() when klp_force is set. As a result, we might keep the reference count even when klp_enable_patch() fails and klp_cancel_transition() is called. This might give the impression that a module might get blocked in some strange init state. Fortunately, it is not the case. The reference count is ignored when mod->init fails and erroneous modules are always removed. Anyway, this might be confusing. Instead, this patch moves the global klp_forced flag into struct klp_patch. As a result, we block only modules that might still be in use after a forced transition. Newly loaded livepatches might be eventually completely removed later. It is not a big deal. But the code is at least consistent with the reality. Signed-off-by: Petr Mladek Acked-by: Joe Lawrence Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 6978785bc059..6a9165d9b090 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -143,6 +143,7 @@ struct klp_object { * @kobj: kobject for sysfs resources * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) + * @forced: was involved in a forced transition * @finish: for waiting till it is safe to remove the patch module */ struct klp_patch { @@ -155,6 +156,7 @@ struct klp_patch { struct kobject kobj; bool kobj_added; bool enabled; + bool forced; struct completion finish; }; -- cgit v1.2.3 From 958ef1e39d24d6cb8bf2a7406130a98c9564230f Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 9 Jan 2019 13:43:23 +0100 Subject: livepatch: Simplify API by removing registration step The possibility to re-enable a registered patch was useful for immediate patches where the livepatch module had to stay until the system reboot. The improved consistency model allows to achieve the same result by unloading and loading the livepatch module again. Also we are going to add a feature called atomic replace. It will allow to create a patch that would replace all already registered patches. The aim is to handle dependent patches more securely. It will obsolete the stack of patches that helped to handle the dependencies so far. Then it might be unclear when a cumulative patch re-enabling is safe. It would be complicated to support the many modes. Instead we could actually make the API and code easier to understand. Therefore, remove the two step public API. All the checks and init calls are moved from klp_register_patch() to klp_enabled_patch(). Also the patch is automatically freed, including the sysfs interface when the transition to the disabled state is completed. As a result, there is never a disabled patch on the top of the stack. Therefore we do not need to check the stack in __klp_enable_patch(). And we could simplify the check in __klp_disable_patch(). Also the API and logic is much easier. It is enough to call klp_enable_patch() in module_init() call. The patch can be disabled by writing '0' into /sys/kernel/livepatch//enabled. Then the module can be removed once the transition finishes and sysfs interface is freed. The only problem is how to free the structures and kobjects safely. The operation is triggered from the sysfs interface. We could not put the related kobject from there because it would cause lock inversion between klp_mutex and kernfs locks, see kn->count lockdep map. Therefore, offload the free task to a workqueue. It is perfectly fine: + The patch can no longer be used in the livepatch operations. + The module could not be removed until the free operation finishes and module_put() is called. + The operation is asynchronous already when the first klp_try_complete_transition() fails and another call is queued with a delay. Suggested-by: Josh Poimboeuf Signed-off-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 6a9165d9b090..8f9c19c69744 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -139,11 +139,12 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched - * @list: list node for global list of registered patches + * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition + * @free_work: patch cleanup from workqueue-context * @finish: for waiting till it is safe to remove the patch module */ struct klp_patch { @@ -157,6 +158,7 @@ struct klp_patch { bool kobj_added; bool enabled; bool forced; + struct work_struct free_work; struct completion finish; }; @@ -168,10 +170,7 @@ struct klp_patch { func->old_name || func->new_func || func->old_sympos; \ func++) -int klp_register_patch(struct klp_patch *); -int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); -int klp_disable_patch(struct klp_patch *); void arch_klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj); -- cgit v1.2.3 From 20e55025958e18e671d92c7adea00c301ac93c43 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Wed, 9 Jan 2019 13:43:24 +0100 Subject: livepatch: Use lists to manage patches, objects and functions Currently klp_patch contains a pointer to a statically allocated array of struct klp_object and struct klp_objects contains a pointer to a statically allocated array of klp_func. In order to allow for the dynamic allocation of objects and functions, link klp_patch, klp_object, and klp_func together via linked lists. This allows us to more easily allocate new objects and functions, while having the iterator be a simple linked list walk. The static structures are added to the lists early. It allows to add the dynamically allocated objects before klp_init_object() and klp_init_func() calls. Therefore it reduces the further changes to the code. This patch does not change the existing behavior. Signed-off-by: Jason Baron [pmladek@suse.com: Initialize lists before init calls] Signed-off-by: Petr Mladek Acked-by: Miroslav Benes Acked-by: Joe Lawrence Cc: Josh Poimboeuf Cc: Jiri Kosina Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 8f9c19c69744..e117e20ff771 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -24,6 +24,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_LIVEPATCH) @@ -42,6 +43,7 @@ * can be found (optional) * @old_func: pointer to the function being patched * @kobj: kobject for sysfs resources + * @node: list node for klp_object func_list * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function @@ -80,6 +82,7 @@ struct klp_func { /* internal */ void *old_func; struct kobject kobj; + struct list_head node; struct list_head stack_node; unsigned long old_size, new_size; bool kobj_added; @@ -117,6 +120,8 @@ struct klp_callbacks { * @funcs: function entries for functions to be patched in the object * @callbacks: functions to be executed pre/post (un)patching * @kobj: kobject for sysfs resources + * @func_list: dynamic list of the function entries + * @node: list node for klp_patch obj_list * @mod: kernel module associated with the patched object * (NULL for vmlinux) * @kobj_added: @kobj has been added and needs freeing @@ -130,6 +135,8 @@ struct klp_object { /* internal */ struct kobject kobj; + struct list_head func_list; + struct list_head node; struct module *mod; bool kobj_added; bool patched; @@ -141,6 +148,7 @@ struct klp_object { * @objs: object entries for kernel objects to be patched * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources + * @obj_list: dynamic list of the object entries * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition @@ -155,6 +163,7 @@ struct klp_patch { /* internal */ struct list_head list; struct kobject kobj; + struct list_head obj_list; bool kobj_added; bool enabled; bool forced; @@ -162,14 +171,20 @@ struct klp_patch { struct completion finish; }; -#define klp_for_each_object(patch, obj) \ +#define klp_for_each_object_static(patch, obj) \ for (obj = patch->objs; obj->funcs || obj->name; obj++) -#define klp_for_each_func(obj, func) \ +#define klp_for_each_object(patch, obj) \ + list_for_each_entry(obj, &patch->obj_list, node) + +#define klp_for_each_func_static(obj, func) \ for (func = obj->funcs; \ func->old_name || func->new_func || func->old_sympos; \ func++) +#define klp_for_each_func(obj, func) \ + list_for_each_entry(func, &obj->func_list, node) + int klp_enable_patch(struct klp_patch *); void arch_klp_init_object_loaded(struct klp_patch *patch, -- cgit v1.2.3 From e1452b607c48c642caf57299f4da83aa002f8533 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Wed, 9 Jan 2019 13:43:25 +0100 Subject: livepatch: Add atomic replace Sometimes we would like to revert a particular fix. Currently, this is not easy because we want to keep all other fixes active and we could revert only the last applied patch. One solution would be to apply new patch that implemented all the reverted functions like in the original code. It would work as expected but there will be unnecessary redirections. In addition, it would also require knowing which functions need to be reverted at build time. Another problem is when there are many patches that touch the same functions. There might be dependencies between patches that are not enforced on the kernel side. Also it might be pretty hard to actually prepare the patch and ensure compatibility with the other patches. Atomic replace && cumulative patches: A better solution would be to create cumulative patch and say that it replaces all older ones. This patch adds a new "replace" flag to struct klp_patch. When it is enabled, a set of 'nop' klp_func will be dynamically created for all functions that are already being patched but that will no longer be modified by the new patch. They are used as a new target during the patch transition. The idea is to handle Nops' structures like the static ones. When the dynamic structures are allocated, we initialize all values that are normally statically defined. The only exception is "new_func" in struct klp_func. It has to point to the original function and the address is known only when the object (module) is loaded. Note that we really need to set it. The address is used, for example, in klp_check_stack_func(). Nevertheless we still need to distinguish the dynamically allocated structures in some operations. For this, we add "nop" flag into struct klp_func and "dynamic" flag into struct klp_object. They need special handling in the following situations: + The structures are added into the lists of objects and functions immediately. In fact, the lists were created for this purpose. + The address of the original function is known only when the patched object (module) is loaded. Therefore it is copied later in klp_init_object_loaded(). + The ftrace handler must not set PC to func->new_func. It would cause infinite loop because the address points back to the beginning of the original function. + The various free() functions must free the structure itself. Note that other ways to detect the dynamic structures are not considered safe. For example, even the statically defined struct klp_object might include empty funcs array. It might be there just to run some callbacks. Also note that the safe iterator must be used in the free() functions. Otherwise already freed structures might get accessed. Special callbacks handling: The callbacks from the replaced patches are _not_ called by intention. It would be pretty hard to define a reasonable semantic and implement it. It might even be counter-productive. The new patch is cumulative. It is supposed to include most of the changes from older patches. In most cases, it will not want to call pre_unpatch() post_unpatch() callbacks from the replaced patches. It would disable/break things for no good reasons. Also it should be easier to handle various scenarios in a single script in the new patch than think about interactions caused by running many scripts from older patches. Not to say that the old scripts even would not expect to be called in this situation. Removing replaced patches: One nice effect of the cumulative patches is that the code from the older patches is no longer used. Therefore the replaced patches can be removed. It has several advantages: + Nops' structs will no longer be necessary and might be removed. This would save memory, restore performance (no ftrace handler), allow clear view on what is really patched. + Disabling the patch will cause using the original code everywhere. Therefore the livepatch callbacks could handle only one scenario. Note that the complication is already complex enough when the patch gets enabled. It is currently solved by calling callbacks only from the new cumulative patch. + The state is clean in both the sysfs interface and lsmod. The modules with the replaced livepatches might even get removed from the system. Some people actually expected this behavior from the beginning. After all a cumulative patch is supposed to "completely" replace an existing one. It is like when a new version of an application replaces an older one. This patch does the first step. It removes the replaced patches from the list of patches. It is safe. The consistency model ensures that they are no longer used. By other words, each process works only with the structures from klp_transition_patch. The removal is done by a special function. It combines actions done by __disable_patch() and klp_complete_transition(). But it is a fast track without all the transaction-related stuff. Signed-off-by: Jason Baron [pmladek@suse.com: Split, reuse existing code, simplified] Signed-off-by: Petr Mladek Cc: Josh Poimboeuf Cc: Jessica Yu Cc: Jiri Kosina Cc: Miroslav Benes Acked-by: Miroslav Benes Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- include/linux/livepatch.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index e117e20ff771..53551f470722 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -48,6 +48,7 @@ * @old_size: size of the old function * @new_size: size of the new function * @kobj_added: @kobj has been added and needs freeing + * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted * @@ -86,6 +87,7 @@ struct klp_func { struct list_head stack_node; unsigned long old_size, new_size; bool kobj_added; + bool nop; bool patched; bool transition; }; @@ -125,6 +127,7 @@ struct klp_callbacks { * @mod: kernel module associated with the patched object * (NULL for vmlinux) * @kobj_added: @kobj has been added and needs freeing + * @dynamic: temporary object for nop functions; dynamically allocated * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { @@ -139,6 +142,7 @@ struct klp_object { struct list_head node; struct module *mod; bool kobj_added; + bool dynamic; bool patched; }; @@ -146,6 +150,7 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched + * @replace: replace all actively used patches * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries @@ -159,6 +164,7 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; + bool replace; /* internal */ struct list_head list; @@ -174,6 +180,9 @@ struct klp_patch { #define klp_for_each_object_static(patch, obj) \ for (obj = patch->objs; obj->funcs || obj->name; obj++) +#define klp_for_each_object_safe(patch, obj, tmp_obj) \ + list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) + #define klp_for_each_object(patch, obj) \ list_for_each_entry(obj, &patch->obj_list, node) @@ -182,6 +191,9 @@ struct klp_patch { func->old_name || func->new_func || func->old_sympos; \ func++) +#define klp_for_each_func_safe(obj, func, tmp_func) \ + list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) + #define klp_for_each_func(obj, func) \ list_for_each_entry(func, &obj->func_list, node) -- cgit v1.2.3 From afb77422819ff60612e9b7d36461b9b2bc8e038e Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Mon, 10 Dec 2018 16:50:19 +0000 Subject: bus: fsl-mc: automatically add a device_link on fsl_mc_[portal,object]_allocate Allocatable devices can be acquired by drivers on the fsl-mc bus using the fsl_mc_portal_allocate or fsl_mc_object_allocate functions. Add a device link between the consumer device and the supplier device so that proper resource management is achieved. Also, adding a link between these devices ensures that a proper unbind order is respected (ie before the supplier device is unbound from its respective driver all consumer devices will be notified and unbound first). Signed-off-by: Ioana Ciornei Reviewed-by: Laurentiu Tudor Signed-off-by: Li Yang --- include/linux/fsl/mc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 741f567253ef..975553a9f75d 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -193,6 +193,7 @@ struct fsl_mc_device { struct resource *regions; struct fsl_mc_device_irq **irqs; struct fsl_mc_resource *resource; + struct device_link *consumer_link; }; #define to_fsl_mc_device(_dev) \ -- cgit v1.2.3 From 98a455d91e7116ca417bc37da6aa2dd633206a6f Mon Sep 17 00:00:00 2001 From: Shunyong Yang Date: Tue, 18 Dec 2018 14:02:45 +0800 Subject: ACPI / tables: table override from built-in initrd In some scenario, we need to build initrd with kernel in a single image. This can simplify system deployment process by downloading the whole system once, such as in IC verification. This patch adds support to override ACPI tables from built-in initrd. Signed-off-by: Shunyong Yang [ rjw: Minor cleanups ] Signed-off-by: Rafael J. Wysocki --- include/linux/initrd.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 14beaff9b445..d77fe34fb00a 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -25,3 +25,6 @@ extern phys_addr_t phys_initrd_start; extern unsigned long phys_initrd_size; extern unsigned int real_root_dev; + +extern char __initramfs_start[]; +extern unsigned long __initramfs_size; -- cgit v1.2.3 From 73f5a82bb3c9fce550da4a74a32b8cb064b50663 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 13 Jan 2019 15:57:04 +0200 Subject: RDMA/mad: Reduce MAD scope to mlx5_ib only Management Datagram Interface (MAD) is applicable only when physical port is Infiniband. It makes MAD command logic to be completely unrelated to eth/core parts of mlx5. Signed-off-by: Leon Romanovsky Acked-by: Jason Gunthorpe --- include/linux/mlx5/driver.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 54299251d40d..4e444863054a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -897,8 +897,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, - u16 opmod, u8 port); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); -- cgit v1.2.3 From 16118794ede91aac1a73abe15de22d3de9d2b775 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 11 Jan 2019 14:33:17 +0100 Subject: posix-cpu-timers: Remove private interval storage Posix CPU timers store the interval in private storage for historical reasons (it_interval used to be a non scalar representation on 32bit systems). This is gone and there is no reason for duplicated storage anymore. Use it_interval everywhere. Signed-off-by: Thomas Gleixner Cc: John Stultz Cc: Peter Zijlstra Cc: "H.J. Lu" Link: https://lkml.kernel.org/r/20190111133500.945255655@linutronix.de --- include/linux/posix-timers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e96581ca7c9d..b20798fc5191 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -12,7 +12,7 @@ struct siginfo; struct cpu_timer_list { struct list_head entry; - u64 expires, incr; + u64 expires; struct task_struct *task; int firing; }; -- cgit v1.2.3 From 8a62ffe2753a845272f4f2100b5fca0b6053ff6f Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 21 Dec 2018 11:33:54 +0100 Subject: PM-runtime: Add new interface to get accounted time Some drivers (like i915/drm) needs to get the accounted suspended time. pm_runtime_suspended_time() will return the suspended accounted time in ns unit. Reviewed-by: Ulf Hansson Signed-off-by: Vincent Guittot Signed-off-by: Rafael J. Wysocki --- include/linux/pm_runtime.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 54af4eef169f..a370006921c0 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev) return dev->power.irq_safe; } +extern u64 pm_runtime_suspended_time(struct device *dev); + #else /* !CONFIG_PM */ static inline bool queue_pm_work(struct work_struct *work) { return false; } -- cgit v1.2.3 From b33a02aadcc6330a61e511240b634dc11112e65e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 9 Jan 2019 17:24:55 +0200 Subject: i2c: acpi: Move I2C bits from acpi.h to i2c.h As discussed previously the best location for certain bus related bits, e.g. I2C, is its own realm of the headers. In order to uncontaminate acpi.h move the I2C bits to i2c.h. There is no functional change intended. Link: https://lkml.org/lkml/2018/11/28/744 Signed-off-by: Andy Shevchenko Acked-by: Mika Westerberg Acked-by: Rafael J. Wysocki Signed-off-by: Wolfram Sang --- include/linux/acpi.h | 11 ----------- include/linux/i2c.h | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 87715f20b69a..13f5cb2c4763 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1061,17 +1061,6 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) } #endif -#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) -bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, - struct acpi_resource_i2c_serialbus **i2c); -#else -static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, - struct acpi_resource_i2c_serialbus **i2c) -{ - return false; -} -#endif - /* Device properties */ #ifdef CONFIG_ACPI diff --git a/include/linux/i2c.h b/include/linux/i2c.h index cba59d66c00d..1f45331924d6 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -967,11 +967,21 @@ static inline int of_i2c_get_board_info(struct device *dev, #endif /* CONFIG_OF */ +struct acpi_resource; +struct acpi_resource_i2c_serialbus; + #if IS_ENABLED(CONFIG_ACPI) +bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c); u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); #else +static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c) +{ + return false; +} static inline u32 i2c_acpi_find_bus_speed(struct device *dev) { return 0; -- cgit v1.2.3 From 063755ab1d1c1127adc09703185967862584935b Mon Sep 17 00:00:00 2001 From: Philippe Schenker Date: Fri, 21 Dec 2018 14:46:31 +0100 Subject: mfd: stmpe: Move ADC related defines to MFD header Move defines that are ADC related to the header of the overlying MFD, so they can be used from multiple sub-devices. Signed-off-by: Philippe Schenker Acked-by: Dmitry Torokhov Signed-off-by: Lee Jones --- include/linux/mfd/stmpe.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index 4a827af17e59..c0353f6431f9 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -10,6 +10,17 @@ #include +#define STMPE_SAMPLE_TIME(x) ((x & 0xf) << 4) +#define STMPE_MOD_12B(x) ((x & 0x1) << 3) +#define STMPE_REF_SEL(x) ((x & 0x1) << 1) +#define STMPE_ADC_FREQ(x) (x & 0x3) +#define STMPE_AVE_CTRL(x) ((x & 0x3) << 6) +#define STMPE_DET_DELAY(x) ((x & 0x7) << 3) +#define STMPE_SETTLING(x) (x & 0x7) +#define STMPE_FRACTION_Z(x) (x & 0x7) +#define STMPE_I_DRIVE(x) (x & 0x1) +#define STMPE_OP_MODE(x) ((x & 0x7) << 1) + struct device; struct regulator; -- cgit v1.2.3 From 6377cfa3b857ced301f2079ac97de6c19057ab65 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Fri, 21 Dec 2018 14:46:32 +0100 Subject: mfd: stmpe: Preparations for STMPE ADC driver This prepares the MFD for the STMPE ADC driver. This commit introduces devicetree settings that are used by the ADC and adds an init function. Common ADC settings that are shared with the touchscreen driver can now reside in the overlying MFD. Signed-off-by: Stefan Agner Signed-off-by: Max Krummenacher Signed-off-by: Philippe Schenker Signed-off-by: Lee Jones --- include/linux/mfd/stmpe.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index c0353f6431f9..07f55aac9390 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h @@ -21,6 +21,9 @@ #define STMPE_I_DRIVE(x) (x & 0x1) #define STMPE_OP_MODE(x) ((x & 0x7) << 1) +#define STMPE811_REG_ADC_CTRL1 0x20 +#define STMPE811_REG_ADC_CTRL2 0x21 + struct device; struct regulator; @@ -134,6 +137,12 @@ struct stmpe { u8 ier[2]; u8 oldier[2]; struct stmpe_platform_data *pdata; + + /* For devices that use an ADC */ + u8 sample_time; + u8 mod_12b; + u8 ref_sel; + u8 adc_freq; }; extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); @@ -147,6 +156,7 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block); extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); +extern int stmpe811_adc_common_init(struct stmpe *stmpe); #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) -- cgit v1.2.3 From 1d7ae53b152dbc5ba0a4f6a83ecc42ac66f52d11 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Wed, 12 Dec 2018 23:38:47 +0300 Subject: iommu: Introduce iotlb_sync_map callback Introduce iotlb_sync_map() callback that is invoked in the end of iommu_map(). This new callback allows IOMMU drivers to avoid syncing after mapping of each contiguous chunk and sync only when the whole mapping is completed, optimizing performance of the mapping operation. Signed-off-by: Dmitry Osipenko Reviewed-by: Robin Murphy Reviewed-by: Thierry Reding Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e90da6b6f3d1..477ef47c357c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -201,6 +201,7 @@ struct iommu_ops { void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_range_add)(struct iommu_domain *domain, unsigned long iova, size_t size); + void (*iotlb_sync_map)(struct iommu_domain *domain); void (*iotlb_sync)(struct iommu_domain *domain); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); -- cgit v1.2.3 From 51908d2e9b7c7730608a19f24fc8718af745bb2f Mon Sep 17 00:00:00 2001 From: Pascal PAILLET-LME Date: Mon, 14 Jan 2019 10:05:16 +0000 Subject: mfd: stpmic1: Add STPMIC1 driver STPMIC1 is a PMIC from STMicroelectronics. The STPMIC1 integrates 10 regulators, 3 power switches, a watchdog and an input for a power on key. Signed-off-by: Pascal Paillet Signed-off-by: Lee Jones --- include/linux/mfd/stpmic1.h | 212 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 include/linux/mfd/stpmic1.h (limited to 'include/linux') diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h new file mode 100644 index 000000000000..fa3f99f7e9a1 --- /dev/null +++ b/include/linux/mfd/stpmic1.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Philippe Peurichard , + * Pascal Paillet for STMicroelectronics. + */ + +#ifndef __LINUX_MFD_STPMIC1_H +#define __LINUX_MFD_STPMIC1_H + +#define TURN_ON_SR 0x1 +#define TURN_OFF_SR 0x2 +#define ICC_LDO_TURN_OFF_SR 0x3 +#define ICC_BUCK_TURN_OFF_SR 0x4 +#define RREQ_STATE_SR 0x5 +#define VERSION_SR 0x6 + +#define SWOFF_PWRCTRL_CR 0x10 +#define PADS_PULL_CR 0x11 +#define BUCKS_PD_CR 0x12 +#define LDO14_PD_CR 0x13 +#define LDO56_VREF_PD_CR 0x14 +#define VBUS_DET_VIN_CR 0x15 +#define PKEY_TURNOFF_CR 0x16 +#define BUCKS_MASK_RANK_CR 0x17 +#define BUCKS_MASK_RESET_CR 0x18 +#define LDOS_MASK_RANK_CR 0x19 +#define LDOS_MASK_RESET_CR 0x1A +#define WCHDG_CR 0x1B +#define WCHDG_TIMER_CR 0x1C +#define BUCKS_ICCTO_CR 0x1D +#define LDOS_ICCTO_CR 0x1E + +#define BUCK1_ACTIVE_CR 0x20 +#define BUCK2_ACTIVE_CR 0x21 +#define BUCK3_ACTIVE_CR 0x22 +#define BUCK4_ACTIVE_CR 0x23 +#define VREF_DDR_ACTIVE_CR 0x24 +#define LDO1_ACTIVE_CR 0x25 +#define LDO2_ACTIVE_CR 0x26 +#define LDO3_ACTIVE_CR 0x27 +#define LDO4_ACTIVE_CR 0x28 +#define LDO5_ACTIVE_CR 0x29 +#define LDO6_ACTIVE_CR 0x2A + +#define BUCK1_STDBY_CR 0x30 +#define BUCK2_STDBY_CR 0x31 +#define BUCK3_STDBY_CR 0x32 +#define BUCK4_STDBY_CR 0x33 +#define VREF_DDR_STDBY_CR 0x34 +#define LDO1_STDBY_CR 0x35 +#define LDO2_STDBY_CR 0x36 +#define LDO3_STDBY_CR 0x37 +#define LDO4_STDBY_CR 0x38 +#define LDO5_STDBY_CR 0x39 +#define LDO6_STDBY_CR 0x3A + +#define BST_SW_CR 0x40 + +#define INT_PENDING_R1 0x50 +#define INT_PENDING_R2 0x51 +#define INT_PENDING_R3 0x52 +#define INT_PENDING_R4 0x53 + +#define INT_DBG_LATCH_R1 0x60 +#define INT_DBG_LATCH_R2 0x61 +#define INT_DBG_LATCH_R3 0x62 +#define INT_DBG_LATCH_R4 0x63 + +#define INT_CLEAR_R1 0x70 +#define INT_CLEAR_R2 0x71 +#define INT_CLEAR_R3 0x72 +#define INT_CLEAR_R4 0x73 + +#define INT_MASK_R1 0x80 +#define INT_MASK_R2 0x81 +#define INT_MASK_R3 0x82 +#define INT_MASK_R4 0x83 + +#define INT_SET_MASK_R1 0x90 +#define INT_SET_MASK_R2 0x91 +#define INT_SET_MASK_R3 0x92 +#define INT_SET_MASK_R4 0x93 + +#define INT_CLEAR_MASK_R1 0xA0 +#define INT_CLEAR_MASK_R2 0xA1 +#define INT_CLEAR_MASK_R3 0xA2 +#define INT_CLEAR_MASK_R4 0xA3 + +#define INT_SRC_R1 0xB0 +#define INT_SRC_R2 0xB1 +#define INT_SRC_R3 0xB2 +#define INT_SRC_R4 0xB3 + +#define PMIC_MAX_REGISTER_ADDRESS INT_SRC_R4 + +#define STPMIC1_PMIC_NUM_IRQ_REGS 4 + +#define TURN_OFF_SR_ICC_EVENT 0x08 + +#define LDO_VOLTAGE_MASK GENMASK(6, 2) +#define BUCK_VOLTAGE_MASK GENMASK(7, 2) +#define LDO_BUCK_VOLTAGE_SHIFT 2 + +#define LDO_ENABLE_MASK BIT(0) +#define BUCK_ENABLE_MASK BIT(0) + +#define BUCK_HPLP_ENABLE_MASK BIT(1) +#define BUCK_HPLP_SHIFT 1 + +#define STDBY_ENABLE_MASK BIT(0) + +#define BUCKS_PD_CR_REG_MASK GENMASK(7, 0) +#define BUCK_MASK_RANK_REGISTER_MASK GENMASK(3, 0) +#define BUCK_MASK_RESET_REGISTER_MASK GENMASK(3, 0) +#define LDO1234_PULL_DOWN_REGISTER_MASK GENMASK(7, 0) +#define LDO56_VREF_PD_CR_REG_MASK GENMASK(5, 0) +#define LDO_MASK_RANK_REGISTER_MASK GENMASK(5, 0) +#define LDO_MASK_RESET_REGISTER_MASK GENMASK(5, 0) + +#define BUCK1_PULL_DOWN_REG BUCKS_PD_CR +#define BUCK1_PULL_DOWN_MASK BIT(0) +#define BUCK2_PULL_DOWN_REG BUCKS_PD_CR +#define BUCK2_PULL_DOWN_MASK BIT(2) +#define BUCK3_PULL_DOWN_REG BUCKS_PD_CR +#define BUCK3_PULL_DOWN_MASK BIT(4) +#define BUCK4_PULL_DOWN_REG BUCKS_PD_CR +#define BUCK4_PULL_DOWN_MASK BIT(6) + +#define LDO1_PULL_DOWN_REG LDO14_PD_CR +#define LDO1_PULL_DOWN_MASK BIT(0) +#define LDO2_PULL_DOWN_REG LDO14_PD_CR +#define LDO2_PULL_DOWN_MASK BIT(2) +#define LDO3_PULL_DOWN_REG LDO14_PD_CR +#define LDO3_PULL_DOWN_MASK BIT(4) +#define LDO4_PULL_DOWN_REG LDO14_PD_CR +#define LDO4_PULL_DOWN_MASK BIT(6) +#define LDO5_PULL_DOWN_REG LDO56_VREF_PD_CR +#define LDO5_PULL_DOWN_MASK BIT(0) +#define LDO6_PULL_DOWN_REG LDO56_VREF_PD_CR +#define LDO6_PULL_DOWN_MASK BIT(2) +#define VREF_DDR_PULL_DOWN_REG LDO56_VREF_PD_CR +#define VREF_DDR_PULL_DOWN_MASK BIT(4) + +#define BUCKS_ICCTO_CR_REG_MASK GENMASK(6, 0) +#define LDOS_ICCTO_CR_REG_MASK GENMASK(5, 0) + +#define LDO_BYPASS_MASK BIT(7) + +/* Main PMIC Control Register + * SWOFF_PWRCTRL_CR + * Address : 0x10 + */ +#define ICC_EVENT_ENABLED BIT(4) +#define PWRCTRL_POLARITY_HIGH BIT(3) +#define PWRCTRL_PIN_VALID BIT(2) +#define RESTART_REQUEST_ENABLED BIT(1) +#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0) + +/* Main PMIC PADS Control Register + * PADS_PULL_CR + * Address : 0x11 + */ +#define WAKEUP_DETECTOR_DISABLED BIT(4) +#define PWRCTRL_PD_ACTIVE BIT(3) +#define PWRCTRL_PU_ACTIVE BIT(2) +#define WAKEUP_PD_ACTIVE BIT(1) +#define PONKEY_PU_INACTIVE BIT(0) + +/* Main PMIC VINLOW Control Register + * VBUS_DET_VIN_CRC DMSC + * Address : 0x15 + */ +#define SWIN_DETECTOR_ENABLED BIT(7) +#define SWOUT_DETECTOR_ENABLED BIT(6) +#define VINLOW_ENABLED BIT(0) +#define VINLOW_CTRL_REG_MASK GENMASK(7, 0) + +/* USB Control Register + * Address : 0x40 + */ +#define BOOST_OVP_DISABLED BIT(7) +#define VBUS_OTG_DETECTION_DISABLED BIT(6) +#define SW_OUT_DISCHARGE BIT(5) +#define VBUS_OTG_DISCHARGE BIT(4) +#define OCP_LIMIT_HIGH BIT(3) +#define SWIN_SWOUT_ENABLED BIT(2) +#define USBSW_OTG_SWITCH_ENABLED BIT(1) +#define BOOST_ENABLED BIT(0) + +/* PKEY_TURNOFF_CR + * Address : 0x16 + */ +#define PONKEY_PWR_OFF BIT(7) +#define PONKEY_CC_FLAG_CLEAR BIT(6) +#define PONKEY_TURNOFF_TIMER_MASK GENMASK(3, 0) +#define PONKEY_TURNOFF_MASK GENMASK(7, 0) + +/* + * struct stpmic1 - stpmic1 master device for sub-drivers + * @dev: master device of the chip (can be used to access platform data) + * @irq: main IRQ number + * @regmap_irq_chip_data: irq chip data + */ +struct stpmic1 { + struct device *dev; + struct regmap *regmap; + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +#endif /* __LINUX_MFD_STPMIC1_H */ -- cgit v1.2.3 From 8e1f456129e61371fb190c71ea182a9f6e21282e Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 9 Jan 2019 15:44:46 +0100 Subject: leds: Add helper for getting default pattern from Device Tree Multiple LED triggers might need to access default pattern so add a helper for that. Signed-off-by: Krzysztof Kozlowski Acked-by: Pavel Machek Signed-off-by: Jacek Anaszewski --- include/linux/leds.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/leds.h b/include/linux/leds.h index 5263f87e1d2c..78204650fe2a 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -218,6 +218,19 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev, */ extern int led_update_brightness(struct led_classdev *led_cdev); +/** + * led_get_default_pattern - return default pattern + * + * @led_cdev: the LED to get default pattern for + * @size: pointer for storing the number of elements in returned array, + * modified only if return != NULL + * + * Return: Allocated array of integers with default pattern from device tree + * or NULL. Caller is responsible for kfree(). + */ +extern u32 *led_get_default_pattern(struct led_classdev *led_cdev, + unsigned int *size); + /** * led_sysfs_disable - disable LED sysfs interface * @led_cdev: the LED to set -- cgit v1.2.3 From fcd44b64b1eb0a33f6cc14f21dcb927ffd664af3 Mon Sep 17 00:00:00 2001 From: Yogesh Narayan Gaur Date: Tue, 15 Jan 2019 10:05:10 +0000 Subject: mtd: spi-nor: add opcodes for octal Read/Write commands - Add opcodes for octal I/O commands * Read : 1-1-8 and 1-8-8 protocol * Write : 1-1-8 and 1-8-8 protocol * opcodes for 4-byte address mode command - Entry of macros in _convert_3to4_xxx function - Add flag SPI_NOR_OCTAL_READ specifying flash support octal read commands. This flag is required for flashes which didn't provides support for auto detection of Octal mode capabilities i.e. not seems to support newer JESD216C standard. Signed-off-by: Vignesh R Signed-off-by: Yogesh Narayan Gaur Reviewed-by: Tudor Ambarus Signed-off-by: Boris Brezillon --- include/linux/mtd/spi-nor.h | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index fa2d89e38e40..2353af8bac99 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -46,9 +46,13 @@ #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */ +#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */ +#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */ #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ @@ -69,9 +73,13 @@ #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */ +#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ #define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ +#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */ +#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */ #define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ #define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ @@ -458,7 +466,7 @@ struct spi_nor_hwcaps { /* *(Fast) Read capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. - * As a matter of performances, it is relevant to use Octo SPI protocols first, + * As a matter of performances, it is relevant to use Octal SPI protocols first, * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly * (Slow) Read. */ @@ -479,7 +487,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_READ_4_4_4 BIT(9) #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) -#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) +#define SNOR_HWCPAS_READ_OCTAL GENMASK(14, 11) #define SNOR_HWCAPS_READ_1_1_8 BIT(11) #define SNOR_HWCAPS_READ_1_8_8 BIT(12) #define SNOR_HWCAPS_READ_8_8_8 BIT(13) @@ -488,7 +496,7 @@ struct spi_nor_hwcaps { /* * Page Program capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. - * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the + * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the * legacy SPI 1-1-1 protocol. * Note that Dual Page Programs are not supported because there is no existing * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory @@ -502,7 +510,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_PP_1_4_4 BIT(18) #define SNOR_HWCAPS_PP_4_4_4 BIT(19) -#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) +#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) #define SNOR_HWCAPS_PP_1_1_8 BIT(20) #define SNOR_HWCAPS_PP_1_8_8 BIT(21) #define SNOR_HWCAPS_PP_8_8_8 BIT(22) -- cgit v1.2.3 From b172fd0c898022c47161a99cb40be5304b0d3fd0 Mon Sep 17 00:00:00 2001 From: Alban Bedel Date: Wed, 16 Jan 2019 19:55:46 +0100 Subject: spi: ath79: Enable support for compile test To allow building this driver in compile test we need to remove all dependency on headers from arch/mips/include. To allow this we explicitly define all the registers locally instead of using ar71xx_regs.h and we move the platform data struct definition to include/linux/platform_data/spi-ath79.h. Signed-off-by: Alban Bedel Signed-off-by: Mark Brown --- include/linux/platform_data/spi-ath79.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/linux/platform_data/spi-ath79.h (limited to 'include/linux') diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h new file mode 100644 index 000000000000..aa71216edf99 --- /dev/null +++ b/include/linux/platform_data/spi-ath79.h @@ -0,0 +1,19 @@ +/* + * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller + * + * Copyright (C) 2008-2010 Gabor Juhos + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef _ATH79_SPI_PLATFORM_H +#define _ATH79_SPI_PLATFORM_H + +struct ath79_spi_platform_data { + unsigned bus_num; + unsigned num_chipselect; +}; + +#endif /* _ATH79_SPI_PLATFORM_H */ -- cgit v1.2.3 From 6d7fbce7da0cd06ff3f3f30e009a15a6243f0bc0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 17 Jan 2019 12:02:57 -0500 Subject: kill kernfs_pin_sb() unused now and impossible to use safely anyway. Signed-off-by: Al Viro --- include/linux/kernfs.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5b36b1287a5a..44acb4c3659c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -357,7 +357,6 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, struct kernfs_root *root, unsigned long magic, bool *new_sb_created, const void *ns); void kernfs_kill_sb(struct super_block *sb); -struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); void kernfs_init(void); -- cgit v1.2.3 From ecfc937210e5fdc6554e49b2a735ff22e72ae3f0 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 15 Jan 2019 15:06:11 -0800 Subject: net: dsa: Split platform data to header file Instead of having net/dsa.h contain both the internal switch tree/driver structures, split the relevant platform_data parts into include/linux/platform_data/dsa.h and make that header be included by net/dsa.h in order not to break any setup. A subsequent set of patches will update code including net/dsa.h to include only the platform_data header. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/platform_data/dsa.h | 68 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 include/linux/platform_data/dsa.h (limited to 'include/linux') diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h new file mode 100644 index 000000000000..d4d9bf2060a6 --- /dev/null +++ b/include/linux/platform_data/dsa.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DSA_PDATA_H +#define __DSA_PDATA_H + +struct device; +struct net_device; + +#define DSA_MAX_SWITCHES 4 +#define DSA_MAX_PORTS 12 +#define DSA_RTABLE_NONE -1 + +struct dsa_chip_data { + /* + * How to access the switch configuration registers. + */ + struct device *host_dev; + int sw_addr; + + /* + * Reference to network devices + */ + struct device *netdev[DSA_MAX_PORTS]; + + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + + /* Device tree node pointer for this specific switch chip + * used during switch setup in case additional properties + * and resources needs to be used + */ + struct device_node *of_node; + + /* + * The names of the switch's ports. Use "cpu" to + * designate the switch port that the cpu is connected to, + * "dsa" to indicate that this port is a DSA link to + * another switch, NULL to indicate the port is unused, + * or any other string to indicate this is a physical port. + */ + char *port_names[DSA_MAX_PORTS]; + struct device_node *port_dn[DSA_MAX_PORTS]; + + /* + * An array of which element [a] indicates which port on this + * switch should be used to send packets to that are destined + * for switch a. Can be NULL if there is only one switch chip. + */ + s8 rtable[DSA_MAX_SWITCHES]; +}; + +struct dsa_platform_data { + /* + * Reference to a Linux network interface that connects + * to the root switch chip of the tree. + */ + struct device *netdev; + struct net_device *of_netdev; + + /* + * Info structs describing each of the switch chips + * connected via this network interface. + */ + int nr_chips; + struct dsa_chip_data *chip; +}; + + +#endif /* __DSA_PDATA_H */ -- cgit v1.2.3 From 8cfb5faf32e85b62f08cfe242ce80b2864d0b8f3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 15 Jan 2019 15:06:13 -0800 Subject: net: dsa: Include platform_data header file b53 and mv88e6xxx support passing platform_data, and now that we have split the platform_data portion from the main net/dsa.h header file, include only the relevant parts. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/platform_data/b53.h | 2 +- include/linux/platform_data/mv88e6xxx.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h index 8eaef2f2b691..c3b61ead41f2 100644 --- a/include/linux/platform_data/b53.h +++ b/include/linux/platform_data/b53.h @@ -20,7 +20,7 @@ #define __B53_H #include -#include +#include struct b53_platform_data { /* Must be first such that dsa_register_switch() can access it */ diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h index f63af2955ea0..963730b44aea 100644 --- a/include/linux/platform_data/mv88e6xxx.h +++ b/include/linux/platform_data/mv88e6xxx.h @@ -2,7 +2,7 @@ #ifndef __DSA_MV88E6XXX_H #define __DSA_MV88E6XXX_H -#include +#include struct dsa_mv88e6xxx_pdata { /* Must be first, such that dsa_register_switch() can access this -- cgit v1.2.3 From 5db5ea995fc2fa89fdef61ef3a658cbb41a24222 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 15 Jan 2019 15:09:35 -0800 Subject: net: phy: Add helpers to determine if PHY driver is generic We are already checking in phy_detach() that the PHY driver is of generic kind (1G or 10G) and we are going to make use of that in the SFP layer as well for 1000BaseT SFP modules, so expose helper functions to return that information. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b051f761450..f1c19bf8c658 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1183,4 +1183,7 @@ module_exit(phy_module_exit) #define module_phy_driver(__phy_drivers) \ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) +bool phy_driver_is_genphy(struct phy_device *phydev); +bool phy_driver_is_genphy_10g(struct phy_device *phydev); + #endif /* __PHY_H */ -- cgit v1.2.3 From 44021606298870e4adc641ef3927e7bb47ca8236 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Tue, 15 Jan 2019 12:22:10 -0500 Subject: cpuidle: use BIT() for idle state flags and remove CPUIDLE_DRIVER_FLAGS_MASK Use BIT() macro to do a small tidy-up. CPUIDLE_DRIVER_FLAGS_MASK is not used, so remove it. Signed-off-by: Yangtao Li Signed-off-by: Rafael J. Wysocki --- include/linux/cpuidle.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 4dff74f48d4b..3b39472324a3 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -69,11 +69,9 @@ struct cpuidle_state { /* Idle State Flags */ #define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ -#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ - -#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; -- cgit v1.2.3 From 87b0984ebfabafcfe959e52ca5cdab5eeb2d60c0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 16 Jan 2019 23:06:50 +0000 Subject: net: Add extack argument to ndo_fdb_add() Drivers may not be able to support certain FDB entries, and an error code is insufficient to give clear hints as to the reasons of rejection. In order to make it possible to communicate the rejection reason, extend ndo_fdb_add() with an extack argument. Adapt the existing implementations of ndo_fdb_add() to take the parameter (and ignore it). Pass the extack parameter when invoking ndo_fdb_add() from rtnl_fdb_add(). Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..a57b9a853aab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1152,7 +1152,8 @@ struct dev_ifalias { * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 vid, u16 flags) + * const unsigned char *addr, u16 vid, u16 flags, + * struct netlink_ext_ack *extack); * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, @@ -1376,7 +1377,8 @@ struct net_device_ops { struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags); + u16 flags, + struct netlink_ext_ack *extack); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, -- cgit v1.2.3 From 8b59bfe83cf15f755024e88812e057af7341f525 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 17 Jan 2019 15:22:20 +0800 Subject: qed: remove duplicated include from qed_if.h Remove duplicated include. Signed-off-by: YueHaibing Acked-by: Denis Bolotin Signed-off-by: David S. Miller --- include/linux/qed/qed_if.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 91c536a01b56..5f818fda96bd 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 58fa4a410fc31afe08d0d0c6b6d8860c22ec17c2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 16 Jan 2019 14:15:20 +0100 Subject: ipc: introduce ksys_ipc()/compat_ksys_ipc() for s390 The sys_ipc() and compat_ksys_ipc() functions are meant to only be used from the system call table, not called by another function. Introduce ksys_*() interfaces for this purpose, as we have done for many other system calls. Link: https://lore.kernel.org/lkml/20190116131527.2071570-3-arnd@arndb.de Signed-off-by: Arnd Bergmann Reviewed-by: Heiko Carstens [heiko.carstens@de.ibm.com: compile fix for !CONFIG_COMPAT] Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- include/linux/syscalls.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 257cccba3062..fb63045a0fb6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1185,6 +1185,10 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); ssize_t ksys_readahead(int fd, loff_t offset, size_t count); +int ksys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void __user * ptr, long fifth); +int compat_ksys_ipc(u32 call, int first, int second, + u32 third, u32 ptr, u32 fifth); /* * The following kernel syscall equivalents are just wrappers to fs-internal -- cgit v1.2.3 From 5f620bb6439ea8f354cfe4c7d47887df9d3acaf0 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Thu, 17 Jan 2019 09:10:55 +0000 Subject: drivers: usb :fsl: Remove USB Errata checking code Remove USB errata checking code from driver. Applicability of erratum is retrieved by reading corresponding property in device tree. This property is written during device tree fixup. Besides, replace spaces with tabs to make code aligned. Signed-off-by: Ramneek Mehresh Signed-off-by: Nikhil Badola Signed-off-by: Yinbo Zhu Signed-off-by: Ran Wang Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- include/linux/fsl_devices.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 60cef8227534..5da56a674f2f 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -98,10 +98,11 @@ struct fsl_usb2_platform_data { unsigned suspended:1; unsigned already_suspended:1; - unsigned has_fsl_erratum_a007792:1; - unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a007792:1; + unsigned has_fsl_erratum_14:1; + unsigned has_fsl_erratum_a005275:1; unsigned has_fsl_erratum_a005697:1; - unsigned check_phy_clk_valid:1; + unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ u32 pm_command; -- cgit v1.2.3 From 2ff5c5a1dc6e6c502e0a3e49db4e792804e43693 Mon Sep 17 00:00:00 2001 From: Martin Hostettler Date: Sat, 15 Dec 2018 15:34:20 +0100 Subject: vt: refactor vc_ques to allow of other private sequences. The vc_ques keeps track if a csi sequence is a private DEC control function beginning with '?'. Nowadays some private control functions begin with '>' and '='. Switch the code to instead use a new 3-bit vc_priv that allows for all private use parameter prefixes. Signed-off-by: Martin Hostettler Signed-off-by: Greg Kroah-Hartman --- include/linux/console_struct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index ab137f97ecbd..ed798e114663 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -119,7 +119,7 @@ struct vc_data { unsigned int vc_s_blink : 1; unsigned int vc_s_reverse : 1; /* misc */ - unsigned int vc_ques : 1; + unsigned int vc_priv : 3; unsigned int vc_need_wrap : 1; unsigned int vc_can_do_color : 1; unsigned int vc_report_mouse : 2; -- cgit v1.2.3 From 202e651cd43c69a43f75b445e90f55b59f9af0ad Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Jan 2019 22:03:34 +0100 Subject: netfilter: conntrack: gre: convert rwlock to rcu We can use gre. Lock is only needed when a new expectation is added. In case a single spinlock proves to be problematic we can either add one per netns or use an array of locks combined with net_hash_mix() or similar to pick the 'correct' one. But given this is only needed for an expectation rather than per packet a single one should be ok. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_conntrack_proto_gre.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 6989e2e4eabf..222c9d3d453f 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -19,6 +19,7 @@ struct nf_conn; struct nf_ct_gre_keymap { struct list_head list; struct nf_conntrack_tuple tuple; + struct rcu_head rcu; }; enum grep_conntrack { -- cgit v1.2.3 From 22fc4c4c9fd60427bcda00878cee94e7622cfa7a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Jan 2019 22:03:35 +0100 Subject: netfilter: conntrack: gre: switch module to be built-in This makes the last of the modular l4 trackers 'bool'. After this, all infrastructure to handle dynamic l4 protocol registration becomes obsolete and can be removed in followup patches. Old: 302824 net/netfilter/nf_conntrack.ko 21504 net/netfilter/nf_conntrack_proto_gre.ko New: 313728 net/netfilter/nf_conntrack.ko Old: text data bss dec hex filename 6281 1732 4 8017 1f51 nf_conntrack_proto_gre.ko 108356 20613 236 129205 1f8b5 nf_conntrack.ko New: 112095 21381 240 133716 20a54 nf_conntrack.ko The size increase is only temporary. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_conntrack_proto_gre.h | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 222c9d3d453f..59714e9ee4ef 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -22,23 +22,11 @@ struct nf_ct_gre_keymap { struct rcu_head rcu; }; -enum grep_conntrack { - GRE_CT_UNREPLIED, - GRE_CT_REPLIED, - GRE_CT_MAX -}; - -struct netns_proto_gre { - struct nf_proto_net nf; - rwlock_t keymap_lock; - struct list_head keymap_list; - unsigned int gre_timeouts[GRE_CT_MAX]; -}; - /* add new tuple->key_reply pair to keymap */ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t); +void nf_ct_gre_keymap_flush(struct net *net); /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -- cgit v1.2.3 From df5e1629087a45ca915fa0f69ea662175261855e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Jan 2019 22:03:37 +0100 Subject: netfilter: conntrack: remove pkt_to_tuple callback GRE is now builtin, so we can handle it via direct call and remove the callback. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_conntrack_proto_gre.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 59714e9ee4ef..25f9a770fb84 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -30,5 +30,7 @@ void nf_ct_gre_keymap_flush(struct net *net); /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); +bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, + struct net *net, struct nf_conntrack_tuple *tuple); #endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ -- cgit v1.2.3 From 570d0200123fb4f809aa2f6226e93a458d664d70 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 18 Jan 2019 10:34:59 +0800 Subject: driver core: move device->knode_class to device_private As the description of struct device_private says, it stores data which is private to driver core. And it already has similar fields like: knode_parent, knode_driver, knode_driver and knode_bus. This look it is more proper to put knode_class together with those fields to make it private to driver core. This patch move device->knode_class to device_private to make it comply with code convention. Signed-off-by: Wei Yang Reviewed-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index 6cb4640b6160..d0e452fd0bff 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1035,7 +1035,6 @@ struct device { spinlock_t devres_lock; struct list_head devres_head; - struct klist_node knode_class; struct class *class; const struct attribute_group **groups; /* optional groups */ -- cgit v1.2.3 From 1cfb2a512e74e577bb0ed7c8d76df90a41a83f6a Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 18 Jan 2019 19:15:59 +0900 Subject: LSM: Make lsm_early_cred() and lsm_early_task() local functions. Since current->cred == current->real_cred when ordered_lsm_init() is called, and lsm_early_cred()/lsm_early_task() need to be called between the amount of required bytes is determined and module specific initialization function is called, we can move these calls from individual modules to ordered_lsm_init(). Signed-off-by: Tetsuo Handa Acked-by: Casey Schaufler Signed-off-by: James Morris --- include/linux/lsm_hooks.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 195707210975..22fc786d723a 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -2112,9 +2112,4 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, extern int lsm_inode_alloc(struct inode *inode); -#ifdef CONFIG_SECURITY -void __init lsm_early_cred(struct cred *cred); -void __init lsm_early_task(struct task_struct *task); -#endif - #endif /* ! __LINUX_LSM_HOOKS_H */ -- cgit v1.2.3 From 7527a7b157d1191b23562ed70154ae93bd65f845 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 17 Jan 2019 20:14:15 +0200 Subject: IB/core: Simplify rdma cgroup registration RDMA cgroup registration routine always returns success, so simplify function to be void and run clang formatter over whole CONFIG_CGROUP_RDMA art of core_priv.h. This reduces unwinding error path for regular registration and future net namespace change functionality for rdma device. Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Acked-by: Tejun Heo Signed-off-by: Jason Gunthorpe --- include/linux/cgroup_rdma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h index e94290b29e99..ef1bae2983f3 100644 --- a/include/linux/cgroup_rdma.h +++ b/include/linux/cgroup_rdma.h @@ -39,7 +39,7 @@ struct rdmacg_device { * APIs for RDMA/IB stack to publish when a device wants to * participate in resource accounting */ -int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_register_device(struct rdmacg_device *device); void rdmacg_unregister_device(struct rdmacg_device *device); /* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ -- cgit v1.2.3 From e302c2a5fe0ca63b8fcc93389917625f486e0670 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 16 Jan 2019 19:47:57 +0100 Subject: net: phy: remove state PHY_CHANGELINK Since recent changes to the phylib state machine state PHY_CHANGELINK isn't used any longer. Therefore let's remove it. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- include/linux/phy.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index f1c19bf8c658..232d93b9cea4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -304,11 +304,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * - irq or timer will set NOLINK if link goes down * - phy_stop moves to HALTED * - * CHANGELINK: PHY experienced a change in link state - * - timer moves to RUNNING if link - * - timer moves to NOLINK if the link is down - * - phy_stop moves to HALTED - * * HALTED: PHY is up, but no polling or interrupts are done. Or * PHY is in an error state. * @@ -327,7 +322,6 @@ enum phy_state { PHY_RUNNING, PHY_NOLINK, PHY_FORCING, - PHY_CHANGELINK, PHY_RESUMING }; -- cgit v1.2.3 From bb658ab7b8f2828b35c207a95cb0c05965721022 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 17 Jan 2019 20:09:21 +0100 Subject: net: phy: remove phy_stop_interrupts Interrupts have been disabled in phy_stop() already. So we can remove phy_stop_interrupts() and free the interrupt in phy_disconnect() directly. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 232d93b9cea4..0990f913d649 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -951,7 +951,6 @@ int phy_aneg_done(struct phy_device *phydev); int phy_speed_down(struct phy_device *phydev, bool sync); int phy_speed_up(struct phy_device *phydev); -int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); int phy_reset_after_clk_enable(struct phy_device *phydev); -- cgit v1.2.3 From 59c28058fa7bb1cc7ab8b2c5607093cbbefafeb4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Jan 2019 10:46:13 -0800 Subject: net: netlink: add helper to retrieve NETLINK_F_STRICT_CHK Dumps can read state of the NETLINK_F_STRICT_CHK flag from a field in the callback structure. For non-dump GET requests we need a way to access the state of that flag from a socket. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/netlink.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4e8add270200..593d1b9c33a8 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -126,6 +126,7 @@ void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, const struct netlink_ext_ack *extack); int netlink_has_listeners(struct sock *sk, unsigned int group); +bool netlink_strict_get_check(struct sk_buff *skb); int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, -- cgit v1.2.3 From f5d782d46aa5d4dd369e6560ce5227136b58926f Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Thu, 13 Dec 2018 02:38:58 +0100 Subject: power: supply: isp1704: switch to gpiod API This migrates isp1704 driver from old GPIO API to new descriptor based GPIO API and drops useless platform data as a side-effect. Migration is simple, since all mainline users are DT based and DT API does not change. Out of tree users of the platform data need to migrate to gpiod_lookup_table as described here: Documentation/driver-api/gpio/board.rst Reviewed-by: Linus Walleij Acked-by: Pavel Machek Signed-off-by: Sebastian Reichel --- include/linux/power/isp1704_charger.h | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 include/linux/power/isp1704_charger.h (limited to 'include/linux') diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h deleted file mode 100644 index 0105d9e7af85..000000000000 --- a/include/linux/power/isp1704_charger.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * ISP1704 USB Charger Detection driver - * - * Copyright (C) 2011 Nokia Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#ifndef __ISP1704_CHARGER_H -#define __ISP1704_CHARGER_H - -struct isp1704_charger_data { - void (*set_power)(bool on); - int enable_gpio; -}; - -#endif -- cgit v1.2.3 From 486efe9f8e30bac1e236f867df164f4966f3e207 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Thu, 10 Jan 2019 13:53:24 +0000 Subject: perf/core: Add function to test for event exclusion flags Add a function that tests if any of the perf event exclusion flags are set on a given event. Signed-off-by: Andrew Murray Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Ivan Kokshaysky Cc: Linus Torvalds Cc: Mark Rutland Cc: Matt Turner Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Richard Henderson Cc: Russell King Cc: Sascha Hauer Cc: Shawn Guo Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linuxppc-dev@lists.ozlabs.org Cc: robin.murphy@arm.com Cc: suzuki.poulose@arm.com Link: https://lkml.kernel.org/r/1547128414-50693-3-git-send-email-andrew.murray@arm.com Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..54a78d22f0a6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1004,6 +1004,15 @@ perf_event__output_id_sample(struct perf_event *event, extern void perf_log_lost_samples(struct perf_event *event, u64 lost); +static inline bool event_has_any_exclude_flag(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + return attr->exclude_idle || attr->exclude_user || + attr->exclude_kernel || attr->exclude_hv || + attr->exclude_guest || attr->exclude_host; +} + static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; -- cgit v1.2.3 From cc6795aeffea0a80d0baf9ad31ba926a6c42cef5 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Thu, 10 Jan 2019 13:53:25 +0000 Subject: perf/core: Add PERF_PMU_CAP_NO_EXCLUDE for exclusion incapable PMUs Many PMU drivers do not have the capability to exclude counting events that occur in specific contexts such as idle, kernel, guest, etc. These drivers indicate this by returning an error in their event_init upon testing the events attribute flags. This approach is error prone and often inconsistent. Let's instead allow PMU drivers to advertise their inability to exclude based on context via a new capability: PERF_PMU_CAP_NO_EXCLUDE. This allows the perf core to reject requests for exclusion events where there is no support in the PMU. Signed-off-by: Andrew Murray Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Ivan Kokshaysky Cc: Linus Torvalds Cc: Mark Rutland Cc: Matt Turner Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Richard Henderson Cc: Russell King Cc: Sascha Hauer Cc: Shawn Guo Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linuxppc-dev@lists.ozlabs.org Cc: robin.murphy@arm.com Cc: suzuki.poulose@arm.com Link: https://lkml.kernel.org/r/1547128414-50693-4-git-send-email-andrew.murray@arm.com Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 54a78d22f0a6..cec02dc63b51 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -244,6 +244,7 @@ struct perf_event; #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 +#define PERF_PMU_CAP_NO_EXCLUDE 0x80 /** * struct pmu - generic performance monitoring unit -- cgit v1.2.3 From 8321be6a9df5c5cfbf3fb5f716caf8698a5a7016 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 21 Jan 2019 14:17:37 +0530 Subject: cpufreq: Replace open-coded << with BIT() Minor clean-up to use BIT() and keep checkpatch happy. Clean up the comment formatting while we're at it to make it easier to read. Signed-off-by: Amit Kucheria Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- include/linux/cpufreq.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index c86d6d8bdfed..bd7fbd6a4478 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -346,14 +346,15 @@ struct cpufreq_driver { }; /* flags */ -#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if - all ->init() calls failed */ -#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other - kernel "constants" aren't - affected by frequency - transitions */ -#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume - speed mismatches */ + +/* driver isn't removed even if all ->init() calls failed */ +#define CPUFREQ_STICKY BIT(0) + +/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ +#define CPUFREQ_CONST_LOOPS BIT(1) + +/* don't warn on suspend/resume speed mismatches */ +#define CPUFREQ_PM_NO_WARN BIT(2) /* * This should be set by platforms having multiple clock-domains, i.e. @@ -361,14 +362,14 @@ struct cpufreq_driver { * be created in cpu/cpu/cpufreq/ directory and so they can use the same * governor with different tunables for different clusters. */ -#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) +#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) /* * Driver will do POSTCHANGE notifications from outside of their ->target() * routine and so must set cpufreq_driver->flags with this flag, so that core * can handle them specially. */ -#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) +#define CPUFREQ_ASYNC_NOTIFICATION BIT(4) /* * Set by drivers which want cpufreq core to check if CPU is running at a @@ -377,13 +378,13 @@ struct cpufreq_driver { * from the table. And if that fails, we will stop further boot process by * issuing a BUG_ON(). */ -#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) +#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) /* * Set by drivers to disallow use of governors with "dynamic_switching" flag * set. */ -#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6) +#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -- cgit v1.2.3 From bbe7449e2599b58cf7b995461e2189998111f907 Mon Sep 17 00:00:00 2001 From: Phillip Potter Date: Mon, 21 Jan 2019 00:54:27 +0000 Subject: fs: common implementation of file type Many file systems use a copy&paste implementation of dirent to on-disk file type conversions. Create a common implementation to be used by file systems with some useful conversion helpers to reduce open coded file type conversions in file system code. Signed-off-by: Amir Goldstein Signed-off-by: Phillip Potter Signed-off-by: Jan Kara --- include/linux/fs.h | 17 +---------- include/linux/fs_types.h | 75 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 16 deletions(-) create mode 100644 include/linux/fs_types.h (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..92966678539d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -1699,22 +1700,6 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); -/* - * File types - * - * NOTE! These match bits 12..15 of stat.st_mode - * (ie "(i_mode >> 12) & 15"). - */ -#define DT_UNKNOWN 0 -#define DT_FIFO 1 -#define DT_CHR 2 -#define DT_DIR 4 -#define DT_BLK 6 -#define DT_REG 8 -#define DT_LNK 10 -#define DT_SOCK 12 -#define DT_WHT 14 - /* * This is the "filldir" function type, used by readdir() to let * the kernel specify what kind of dirent layout it wants to have. diff --git a/include/linux/fs_types.h b/include/linux/fs_types.h new file mode 100644 index 000000000000..54816791196f --- /dev/null +++ b/include/linux/fs_types.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FS_TYPES_H +#define _LINUX_FS_TYPES_H + +/* + * This is a header for the common implementation of dirent + * to fs on-disk file type conversion. Although the fs on-disk + * bits are specific to every file system, in practice, many + * file systems use the exact same on-disk format to describe + * the lower 3 file type bits that represent the 7 POSIX file + * types. + * + * It is important to note that the definitions in this + * header MUST NOT change. This would break both the + * userspace ABI and the on-disk format of filesystems + * using this code. + * + * All those file systems can use this generic code for the + * conversions. + */ + +/* + * struct dirent file types + * exposed to user via getdents(2), readdir(3) + * + * These match bits 12..15 of stat.st_mode + * (ie "(i_mode >> 12) & 15"). + */ +#define S_DT_SHIFT 12 +#define S_DT(mode) (((mode) & S_IFMT) >> S_DT_SHIFT) +#define S_DT_MASK (S_IFMT >> S_DT_SHIFT) + +/* these are defined by POSIX and also present in glibc's dirent.h */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +#define DT_MAX (S_DT_MASK + 1) /* 16 */ + +/* + * fs on-disk file types. + * Only the low 3 bits are used for the POSIX file types. + * Other bits are reserved for fs private use. + * These definitions are shared and used by multiple filesystems, + * and MUST NOT change under any circumstances. + * + * Note that no fs currently stores the whiteout type on-disk, + * so whiteout dirents are exposed to user as DT_CHR. + */ +#define FT_UNKNOWN 0 +#define FT_REG_FILE 1 +#define FT_DIR 2 +#define FT_CHRDEV 3 +#define FT_BLKDEV 4 +#define FT_FIFO 5 +#define FT_SOCK 6 +#define FT_SYMLINK 7 + +#define FT_MAX 8 + +/* + * declarations for helper functions, accompanying implementation + * is in fs/fs_types.c + */ +extern unsigned char fs_ftype_to_dtype(unsigned int filetype); +extern unsigned char fs_umode_to_ftype(umode_t mode); +extern unsigned char fs_umode_to_dtype(umode_t mode); + +#endif -- cgit v1.2.3 From dc60a4cfb77c891f67f31953025208067b05883c Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Thu, 17 Jan 2019 11:47:55 -0200 Subject: media: soc_camera_platform: remove obsolete soc_camera test driver This is a test stub driver for soc_camera. Since soc_camera is being deprecated (and in fact, nobody is using it anymore) there's no sense in keeping this test driver. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- .../platform_data/media/soc_camera_platform.h | 83 ---------------------- 1 file changed, 83 deletions(-) delete mode 100644 include/linux/platform_data/media/soc_camera_platform.h (limited to 'include/linux') diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h deleted file mode 100644 index 1e5065dab430..000000000000 --- a/include/linux/platform_data/media/soc_camera_platform.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Generic Platform Camera Driver Header - * - * Copyright (C) 2008 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __SOC_CAMERA_H__ -#define __SOC_CAMERA_H__ - -#include -#include -#include - -struct device; - -struct soc_camera_platform_info { - const char *format_name; - unsigned long format_depth; - struct v4l2_mbus_framefmt format; - unsigned long mbus_param; - enum v4l2_mbus_type mbus_type; - struct soc_camera_device *icd; - int (*set_capture)(struct soc_camera_platform_info *info, int enable); -}; - -static inline void soc_camera_platform_release(struct platform_device **pdev) -{ - *pdev = NULL; -} - -static inline int soc_camera_platform_add(struct soc_camera_device *icd, - struct platform_device **pdev, - struct soc_camera_link *plink, - void (*release)(struct device *dev), - int id) -{ - struct soc_camera_subdev_desc *ssdd = - (struct soc_camera_subdev_desc *)plink; - struct soc_camera_platform_info *info = ssdd->drv_priv; - int ret; - - if (&icd->sdesc->subdev_desc != ssdd) - return -ENODEV; - - if (*pdev) - return -EBUSY; - - *pdev = platform_device_alloc("soc_camera_platform", id); - if (!*pdev) - return -ENOMEM; - - info->icd = icd; - - (*pdev)->dev.platform_data = info; - (*pdev)->dev.release = release; - - ret = platform_device_add(*pdev); - if (ret < 0) { - platform_device_put(*pdev); - *pdev = NULL; - info->icd = NULL; - } - - return ret; -} - -static inline void soc_camera_platform_del(const struct soc_camera_device *icd, - struct platform_device *pdev, - const struct soc_camera_link *plink) -{ - const struct soc_camera_subdev_desc *ssdd = - (const struct soc_camera_subdev_desc *)plink; - if (&icd->sdesc->subdev_desc != ssdd || !pdev) - return; - - platform_device_unregister(pdev); -} - -#endif /* __SOC_CAMERA_H__ */ -- cgit v1.2.3 From 1fc1b63638da1accb27264a507b23aa6863c3852 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sat, 19 Jan 2019 16:04:12 +0100 Subject: spi: spi-mem: Add devm_spi_mem_dirmap_{create,destroy}() Since direct mapping descriptors usually the same lifetime as the SPI MEM device adding devm_ variants of the spi_mem_dirmap_{create,destroy}() should greatly simplify error/remove path of spi-mem drivers making use of the direct mapping API. Signed-off-by: Boris Brezillon Signed-off-by: Mark Brown --- include/linux/spi/spi-mem.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 3fe24500c5ee..3703d0dcac2e 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -330,6 +330,11 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf); ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); +struct spi_mem_dirmap_desc * +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, + const struct spi_mem_dirmap_info *info); +void devm_spi_mem_dirmap_destroy(struct device *dev, + struct spi_mem_dirmap_desc *desc); int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); -- cgit v1.2.3 From cf5c6c211b7e9eb4f4219f83671432c9ef257187 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 17 Jan 2019 15:25:04 +0800 Subject: perf: Remove duplicated workqueue.h include from perf_event.h It is already included a little bit higher up in that file. Signed-off-by: YueHaibing Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20190117072504.14428-1-yuehaibing@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index cec02dc63b51..f8ec36197718 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -53,7 +53,6 @@ struct perf_guest_info_callbacks { #include #include #include -#include #include #include -- cgit v1.2.3 From 5620196951192f7cd2da0a04e7c0113f40bfc14e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 11 Jan 2019 13:20:20 -0300 Subject: perf: Make perf_event_output() propagate the output() return For the original mode of operation it isn't needed, since we report back errors via PERF_RECORD_LOST records in the ring buffer, but for use in bpf_perf_event_output() it is convenient to return the errors, basically -ENOSPC. Currently bpf_perf_event_output() returns an error indication, the last thing it does, which is to push it to the ring buffer is that can fail and if so, this failure won't be reported back to its users, fix it. Reported-by: Jamal Hadi Salim Tested-by: Jamal Hadi Salim Acked-by: Peter Zijlstra (Intel) Cc: Adrian Hunter Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lkml.kernel.org/r/20190118150938.GN5823@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f8ec36197718..4eb88065a9b5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -978,9 +978,9 @@ extern void perf_event_output_forward(struct perf_event *event, extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); -extern void perf_event_output(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs); +extern int perf_event_output(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); static inline bool is_default_overflow_handler(struct perf_event *event) -- cgit v1.2.3 From 76193a94522f1d4edf2447a536f3f796ce56343b Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Jan 2019 08:15:13 -0800 Subject: perf, bpf: Introduce PERF_RECORD_KSYMBOL For better performance analysis of dynamically JITed and loaded kernel functions, such as BPF programs, this patch introduces PERF_RECORD_KSYMBOL, a new perf_event_type that exposes kernel symbol register/unregister information to user space. The following data structure is used for PERF_RECORD_KSYMBOL. /* * struct { * struct perf_event_header header; * u64 addr; * u32 len; * u16 ksym_type; * u16 flags; * char name[]; * struct sample_id sample_id; * }; */ Signed-off-by: Song Liu Reviewed-by: Arnaldo Carvalho de Melo Tested-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Peter Zijlstra Cc: kernel-team@fb.com Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/20190117161521.1341602-2-songliubraving@fb.com Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4eb88065a9b5..136fe0495374 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1122,6 +1122,10 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, } extern void perf_event_mmap(struct vm_area_struct *vma); + +extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, + bool unregister, const char *sym); + extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1342,6 +1346,10 @@ static inline int perf_unregister_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } + +typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); +static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, + bool unregister, const char *sym) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } -- cgit v1.2.3 From 6ee52e2a3fe4ea35520720736e6791df1fb67106 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Jan 2019 08:15:15 -0800 Subject: perf, bpf: Introduce PERF_RECORD_BPF_EVENT For better performance analysis of BPF programs, this patch introduces PERF_RECORD_BPF_EVENT, a new perf_event_type that exposes BPF program load/unload information to user space. Each BPF program may contain up to BPF_MAX_SUBPROGS (256) sub programs. The following example shows kernel symbols for a BPF program with 7 sub programs: ffffffffa0257cf9 t bpf_prog_b07ccb89267cf242_F ffffffffa02592e1 t bpf_prog_2dcecc18072623fc_F ffffffffa025b0e9 t bpf_prog_bb7a405ebaec5d5c_F ffffffffa025dd2c t bpf_prog_a7540d4a39ec1fc7_F ffffffffa025fcca t bpf_prog_05762d4ade0e3737_F ffffffffa026108f t bpf_prog_db4bd11e35df90d4_F ffffffffa0263f00 t bpf_prog_89d64e4abf0f0126_F ffffffffa0257cf9 t bpf_prog_ae31629322c4b018__dummy_tracepoi When a bpf program is loaded, PERF_RECORD_KSYMBOL is generated for each of these sub programs. Therefore, PERF_RECORD_BPF_EVENT is not needed for simple profiling. For annotation, user space need to listen to PERF_RECORD_BPF_EVENT and gather more information about these (sub) programs via sys_bpf. Signed-off-by: Song Liu Reviewed-by: Arnaldo Carvalho de Melo Acked-by: Alexei Starovoitov Acked-by: Peter Zijlstra (Intel) Tested-by: Arnaldo Carvalho de Melo Cc: Daniel Borkmann Cc: Peter Zijlstra Cc: kernel-team@fb.com Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/20190117161521.1341602-4-songliubraving@fb.com Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/filter.h | 7 +++++++ include/linux/perf_event.h | 6 ++++++ 2 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..d531d4250bff 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -951,6 +951,7 @@ bpf_address_lookup(unsigned long addr, unsigned long *size, void bpf_prog_kallsyms_add(struct bpf_prog *fp); void bpf_prog_kallsyms_del(struct bpf_prog *fp); +void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); #else /* CONFIG_BPF_JIT */ @@ -1006,6 +1007,12 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } + +static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) +{ + sym[0] = '\0'; +} + #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 136fe0495374..a79e59fc3b7d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1125,6 +1125,9 @@ extern void perf_event_mmap(struct vm_area_struct *vma); extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym); +extern void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags); extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1350,6 +1353,9 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) { } typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym) { } +static inline void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } -- cgit v1.2.3 From 534fd7aac56a7994d16032f32123def9923e339f Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 13 Jan 2019 16:01:17 +0200 Subject: IB/mlx5: Manage indirection mkey upon DEVX flow for ODP Manage indirection mkey upon DEVX flow to support ODP. To support a page fault event on the indirection mkey it needs to be part of the device mkey radix tree. Both the creation and the deletion flows for a DEVX object which is indirection mkey were adapted to handle that. Signed-off-by: Yishai Hadas Reviewed-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/linux/mlx5/driver.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b6f5839f129a..619d6fee96a1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -364,6 +364,7 @@ struct mlx5_core_sig_ctx { enum { MLX5_MKEY_MR = 1, MLX5_MKEY_MW, + MLX5_MKEY_INDIRECT_DEVX, }; struct mlx5_core_mkey { -- cgit v1.2.3 From 1278cf66cf4b1c3d30e311200b50c45457c92baa Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: nvram: Replace nvram_* function exports with static functions Replace nvram_* functions with static functions in nvram.h. These will become wrappers for struct nvram_ops method calls. This patch effectively disables existing NVRAM functionality so as to allow the rest of the series to be bisected without build failures. That functionality is gradually re-implemented in subsequent patches. Replace the sole validate-checksum-and-read-byte sequence with a call to nvram_read() which will gain the same semantics in subsequent patches. Remove unused exports. Acked-by: Geert Uytterhoeven Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 28bfb9ab94ca..eb5b52a9a747 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -2,13 +2,31 @@ #ifndef _LINUX_NVRAM_H #define _LINUX_NVRAM_H +#include #include -/* __foo is foo without grabbing the rtc_lock - get it yourself */ -extern unsigned char __nvram_read_byte(int i); -extern unsigned char nvram_read_byte(int i); -extern void __nvram_write_byte(unsigned char c, int i); -extern void nvram_write_byte(unsigned char c, int i); -extern int __nvram_check_checksum(void); -extern int nvram_check_checksum(void); +static inline ssize_t nvram_get_size(void) +{ + return -ENODEV; +} + +static inline unsigned char nvram_read_byte(int addr) +{ + return 0xFF; +} + +static inline void nvram_write_byte(unsigned char val, int addr) +{ +} + +static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) +{ + return -ENODEV; +} + #endif /* _LINUX_NVRAM_H */ -- cgit v1.2.3 From a084dbf6592c22468eb946014b2e731fb42da7a9 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: m68k/atari: Implement arch_nvram_ops struct By implementing an arch_nvram_ops struct, a platform can re-use the drivers/char/nvram.c module without needing any arch-specific code in that module. Atari does so here. Acked-by: Geert Uytterhoeven Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index eb5b52a9a747..a1e01dc89759 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,8 +5,18 @@ #include #include +struct nvram_ops { + ssize_t (*get_size)(void); + ssize_t (*read)(char *, size_t, loff_t *); + ssize_t (*write)(char *, size_t, loff_t *); +}; + +extern const struct nvram_ops arch_nvram_ops; + static inline ssize_t nvram_get_size(void) { + if (arch_nvram_ops.get_size) + return arch_nvram_ops.get_size(); return -ENODEV; } @@ -21,11 +31,15 @@ static inline void nvram_write_byte(unsigned char val, int addr) static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { + if (arch_nvram_ops.read) + return arch_nvram_ops.read(buf, count, ppos); return -ENODEV; } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { + if (arch_nvram_ops.write) + return arch_nvram_ops.write(buf, count, ppos); return -ENODEV; } -- cgit v1.2.3 From a156c7ba669c65b55c7afcc3994e1199cc0cad47 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Replace nvram_* extern declarations with standard header Remove the nvram_read_byte() and nvram_write_byte() declarations in powerpc/include/asm/nvram.h and use the cross-platform static functions in linux/nvram.h instead. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index a1e01dc89759..79431dab87a1 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -15,8 +15,11 @@ extern const struct nvram_ops arch_nvram_ops; static inline ssize_t nvram_get_size(void) { +#ifdef CONFIG_PPC +#else if (arch_nvram_ops.get_size) return arch_nvram_ops.get_size(); +#endif return -ENODEV; } -- cgit v1.2.3 From d5bbb5021ce8d9ff561c7469f5b4589ccb3bc4a6 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Adopt arch_nvram_ops NVRAMs on different platforms and architectures have different attributes and access methods. E.g. some platforms have byte-at-a-time accessor functions while others have byte-range accessor functions. Some have checksum functionality while others do not. By calling ops struct methods via the common wrapper functions, the nvram module and other drivers can make use of the available NVRAM functionality in a portable way. Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 79431dab87a1..bb4ea8cc6ea6 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,8 +5,30 @@ #include #include +/** + * struct nvram_ops - NVRAM functionality made available to drivers + * @read: validate checksum (if any) then load a range of bytes from NVRAM + * @write: store a range of bytes to NVRAM then update checksum (if any) + * @read_byte: load a single byte from NVRAM + * @write_byte: store a single byte to NVRAM + * @get_size: return the fixed number of bytes in the NVRAM + * + * Architectures which provide an nvram ops struct need not implement all + * of these methods. If the NVRAM hardware can be accessed only one byte + * at a time then it may be sufficient to provide .read_byte and .write_byte. + * If the NVRAM has a checksum (and it is to be checked) the .read and + * .write methods can be used to implement that efficiently. + * + * Portable drivers may use the wrapper functions defined here. + * The nvram_read() and nvram_write() functions call the .read and .write + * methods when available and fall back on the .read_byte and .write_byte + * methods otherwise. + */ + struct nvram_ops { ssize_t (*get_size)(void); + unsigned char (*read_byte)(int); + void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); }; @@ -25,11 +47,21 @@ static inline ssize_t nvram_get_size(void) static inline unsigned char nvram_read_byte(int addr) { +#ifdef CONFIG_PPC +#else + if (arch_nvram_ops.read_byte) + return arch_nvram_ops.read_byte(addr); +#endif return 0xFF; } static inline void nvram_write_byte(unsigned char val, int addr) { +#ifdef CONFIG_PPC +#else + if (arch_nvram_ops.write_byte) + arch_nvram_ops.write_byte(val, addr); +#endif } static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) -- cgit v1.2.3 From 2d58636e0af724f38acad25246c1625efec36122 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Allow the set_checksum and initialize ioctls to be omitted The drivers/char/nvram.c module has previously supported only RTC "CMOS" NVRAM, for which it provides appropriate checksum ioctls. Make these ioctls optional so the module can be re-used with other kinds of NVRAM. The ops struct methods that implement the ioctls now return error codes so that a multi-platform kernel binary can do the right thing when running on hardware without a suitable NVRAM. Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index bb4ea8cc6ea6..31c763087746 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -31,6 +31,8 @@ struct nvram_ops { void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); + long (*initialize)(void); + long (*set_checksum)(void); }; extern const struct nvram_ops arch_nvram_ops; -- cgit v1.2.3 From 109b3a89a7c48405d61a05d7a1720581a4f1574c Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Implement NVRAM read/write methods Refactor the RTC "CMOS" NVRAM functions so that they can be used as arch_nvram_ops methods. Checksumming logic is moved from the misc device operations to the nvram read/write operations. This makes the misc device implementation more generic. This preserves the locking mechanism such that "read if checksum valid" and "write and update checksum" remain atomic operations. Some platforms implement byte-range read/write methods which are similar to file_operations struct methods. Other platforms provide only byte-at-a-time methods. The former are more efficient but may be unavailable so fall back on the latter methods when necessary. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 31c763087746..9df85703735c 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -66,18 +66,46 @@ static inline void nvram_write_byte(unsigned char val, int addr) #endif } +static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos) +{ + ssize_t nvram_size = nvram_get_size(); + loff_t i; + char *p = buf; + + if (nvram_size < 0) + return nvram_size; + for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) + *p = nvram_read_byte(i); + *ppos = i; + return p - buf; +} + +static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) +{ + ssize_t nvram_size = nvram_get_size(); + loff_t i; + char *p = buf; + + if (nvram_size < 0) + return nvram_size; + for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) + nvram_write_byte(*p, i); + *ppos = i; + return p - buf; +} + static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { if (arch_nvram_ops.read) return arch_nvram_ops.read(buf, count, ppos); - return -ENODEV; + return nvram_read_bytes(buf, count, ppos); } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { if (arch_nvram_ops.write) return arch_nvram_ops.write(buf, count, ppos); - return -ENODEV; + return nvram_write_bytes(buf, count, ppos); } #endif /* _LINUX_NVRAM_H */ -- cgit v1.2.3 From 95ac14b8a32817dcd1f13ae4787891484966d2d5 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Implement nvram ioctls Add the powerpc-specific ioctls to the nvram module. This allows the nvram module to replace the generic_nvram module. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 9df85703735c..9e3a957c8f1f 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -31,8 +31,10 @@ struct nvram_ops { void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); +#if defined(CONFIG_X86) || defined(CONFIG_M68K) long (*initialize)(void); long (*set_checksum)(void); +#endif }; extern const struct nvram_ops arch_nvram_ops; -- cgit v1.2.3 From f9c3a570f5fc584f2ca2dd222d1b8c8537fc55f6 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Enable HAVE_ARCH_NVRAM_OPS and disable GENERIC_NVRAM Switch PPC32 kernels from the generic_nvram module to the nvram module. Also fix a theoretical bug where CHRP omits the chrp_nvram_init() call when CONFIG_NVRAM_MODULE=m. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 9e3a957c8f1f..d29d9c93a927 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,6 +5,10 @@ #include #include +#ifdef CONFIG_PPC +#include +#endif + /** * struct nvram_ops - NVRAM functionality made available to drivers * @read: validate checksum (if any) then load a range of bytes from NVRAM @@ -42,6 +46,8 @@ extern const struct nvram_ops arch_nvram_ops; static inline ssize_t nvram_get_size(void) { #ifdef CONFIG_PPC + if (ppc_md.nvram_size) + return ppc_md.nvram_size(); #else if (arch_nvram_ops.get_size) return arch_nvram_ops.get_size(); @@ -52,6 +58,8 @@ static inline ssize_t nvram_get_size(void) static inline unsigned char nvram_read_byte(int addr) { #ifdef CONFIG_PPC + if (ppc_md.nvram_read_val) + return ppc_md.nvram_read_val(addr); #else if (arch_nvram_ops.read_byte) return arch_nvram_ops.read_byte(addr); @@ -62,6 +70,8 @@ static inline unsigned char nvram_read_byte(int addr) static inline void nvram_write_byte(unsigned char val, int addr) { #ifdef CONFIG_PPC + if (ppc_md.nvram_write_val) + ppc_md.nvram_write_val(addr, val); #else if (arch_nvram_ops.write_byte) arch_nvram_ops.write_byte(val, addr); @@ -98,15 +108,25 @@ static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { +#ifdef CONFIG_PPC + if (ppc_md.nvram_read) + return ppc_md.nvram_read(buf, count, ppos); +#else if (arch_nvram_ops.read) return arch_nvram_ops.read(buf, count, ppos); +#endif return nvram_read_bytes(buf, count, ppos); } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { +#ifdef CONFIG_PPC + if (ppc_md.nvram_write) + return ppc_md.nvram_write(buf, count, ppos); +#else if (arch_nvram_ops.write) return arch_nvram_ops.write(buf, count, ppos); +#endif return nvram_write_bytes(buf, count, ppos); } -- cgit v1.2.3 From 8092e79204e7884f4bee3584ecfe6cf4a124d129 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Thu, 20 Dec 2018 23:28:37 -0800 Subject: ihex: Share code between ihex_validate_fw() and ihex_next_binrec() Convert both ihex_validate_fw() and ihex_next_binrec() to use a helper function to calculate next record offest. This way we only have one place implementing next record offset calculation logic. No functional change intended. Cc: Chris Healy Cc: Kyle McMartin Cc: Andrew Morton Cc: Masahiro Yamada Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: linux-kernel Signed-off-by: Andrey Smirnov Signed-off-by: Greg Kroah-Hartman --- include/linux/ihex.h | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 75c194391869..9c701521176b 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -23,29 +23,34 @@ struct ihex_binrec { /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * -ihex_next_binrec(const struct ihex_binrec *rec) +__ihex_next_binrec(const struct ihex_binrec *rec) { int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; rec = (void *)&rec->data[next]; + return rec; +} + +static inline const struct ihex_binrec * +ihex_next_binrec(const struct ihex_binrec *rec) +{ + rec = __ihex_next_binrec(rec); + return be16_to_cpu(rec->len) ? rec : NULL; } /* Check that ihex_next_binrec() won't take us off the end of the image... */ static inline int ihex_validate_fw(const struct firmware *fw) { - const struct ihex_binrec *rec; - size_t ofs = 0; + const struct ihex_binrec *end, *rec; - while (ofs <= fw->size - sizeof(*rec)) { - rec = (void *)&fw->data[ofs]; + rec = (const void *)fw->data; + end = (const void *)&fw->data[fw->size - sizeof(*end)]; + for (; rec <= end; rec = __ihex_next_binrec(rec)) { /* Zero length marks end of records */ if (!be16_to_cpu(rec->len)) return 0; - - /* Point to next record... */ - ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; } return -EINVAL; } -- cgit v1.2.3 From 5158c36ec9d0b3343f58987cec7ebfd866331fd0 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Thu, 20 Dec 2018 23:28:38 -0800 Subject: ihex: Check if zero-length record is at the end of the blob When verifying the validity of IHEX file we need to make sure that zero-length record we found is located at the end of the file. Not doing that could result in an invalid file with a bogus zero-length in the middle short-circuiting the check and being reported as valid. Cc: Chris Healy Cc: Kyle McMartin Cc: Andrew Morton Cc: Masahiro Yamada Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: linux-kernel Signed-off-by: Andrey Smirnov Signed-off-by: Greg Kroah-Hartman --- include/linux/ihex.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 9c701521176b..9130f307a420 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -49,7 +49,7 @@ static inline int ihex_validate_fw(const struct firmware *fw) for (; rec <= end; rec = __ihex_next_binrec(rec)) { /* Zero length marks end of records */ - if (!be16_to_cpu(rec->len)) + if (rec == end && !be16_to_cpu(rec->len)) return 0; } return -EINVAL; -- cgit v1.2.3 From 9fb4ab4d3dd665a62da9c176a89e7c7feaf5d9e4 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Thu, 20 Dec 2018 23:28:39 -0800 Subject: ihex: Simplify next record offset calculation Next record calucaltion can be reduced to a much more tivial ALIGN operation as follows: 1. Splitting 5 into 2 + 3 we get next = ((be16_to_cpu(rec->len) + 2 + 3) & ~3) - 2 (1) 2. Using ALIGN macro we reduce (1) to: ALIGN(be16_to_cpu(rec->len) + 2, 4) - 2 (2) 3. Subsituting 'next' in original next record calucation we get: (void *)&rec->data[ALIGN(be16_to_cpu(rec->len) + 2, 4) - 2] (3) 4. Converting array index to pointer arithmetic we convert (3) into: (void *)rec + sizeof(*rec) + ALIGN(be16_to_cpu(rec->len) + 2, 4) - 2 (4) 5. Subsituting sizeof(*rec) with its value, 6, and substracting 2, in (4) we get: (void *)rec + ALIGN(be16_to_cpu(rec->len) + 2, 4) + 4 (5) 6. Since ALIGN(X, 4) + 4 == ALIGN(X + 4, 4), (5) can be converted to: (void *)rec + ALIGN(be16_to_cpu(rec->len) + 6, 4) (6) 5. Subsituting 6 in (6) to sizeof(*rec) we get: (void *)rec + ALIGN(be16_to_cpu(rec->len) + sizeof(*rec), 4) (7) Using expression (7) should make it more clear that next record is located by adding full size of the current record (payload + auxiliary data) aligned to 4 bytes, to the location of the current one. No functional change intended. Cc: Chris Healy Cc: Kyle McMartin Cc: Andrew Morton Cc: Masahiro Yamada Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: linux-kernel Signed-off-by: Andrey Smirnov Signed-off-by: Greg Kroah-Hartman --- include/linux/ihex.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 9130f307a420..98cb5ce0b0a0 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -21,14 +21,18 @@ struct ihex_binrec { uint8_t data[0]; } __attribute__((packed)); +static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) +{ + return be16_to_cpu(p->len) + sizeof(*p); +} + /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * __ihex_next_binrec(const struct ihex_binrec *rec) { - int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; - rec = (void *)&rec->data[next]; + const void *p = rec; - return rec; + return p + ALIGN(ihex_binrec_size(rec), 4); } static inline const struct ihex_binrec * -- cgit v1.2.3 From 11f1ceca7031deefc1a34236ab7b94360016b71d Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Wed, 16 Jan 2019 18:10:56 +0200 Subject: interconnect: Add generic on-chip interconnect API This patch introduces a new API to get requirements and configure the interconnect buses across the entire chipset to fit with the current demand. The API is using a consumer/provider-based model, where the providers are the interconnect buses and the consumers could be various drivers. The consumers request interconnect resources (path) between endpoints and set the desired constraints on this data flow path. The providers receive requests from consumers and aggregate these requests for all master-slave pairs on that path. Then the providers configure each node along the path to support a bandwidth that satisfies all bandwidth requests that cross through that node. The topology could be complicated and multi-tiered and is SoC specific. Reviewed-by: Evan Green Signed-off-by: Georgi Djakov Signed-off-by: Greg Kroah-Hartman --- include/linux/interconnect-provider.h | 125 ++++++++++++++++++++++++++++++++++ include/linux/interconnect.h | 52 ++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 include/linux/interconnect-provider.h create mode 100644 include/linux/interconnect.h (limited to 'include/linux') diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h new file mode 100644 index 000000000000..78208a754181 --- /dev/null +++ b/include/linux/interconnect-provider.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __LINUX_INTERCONNECT_PROVIDER_H +#define __LINUX_INTERCONNECT_PROVIDER_H + +#include + +#define icc_units_to_bps(bw) ((bw) * 1000ULL) + +struct icc_node; + +/** + * struct icc_provider - interconnect provider (controller) entity that might + * provide multiple interconnect controls + * + * @provider_list: list of the registered interconnect providers + * @nodes: internal list of the interconnect provider nodes + * @set: pointer to device specific set operation function + * @aggregate: pointer to device specific aggregate operation function + * @dev: the device this interconnect provider belongs to + * @users: count of active users + * @data: pointer to private data + */ +struct icc_provider { + struct list_head provider_list; + struct list_head nodes; + int (*set)(struct icc_node *src, struct icc_node *dst); + int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, + u32 *agg_avg, u32 *agg_peak); + struct device *dev; + int users; + void *data; +}; + +/** + * struct icc_node - entity that is part of the interconnect topology + * + * @id: platform specific node id + * @name: node name used in debugfs + * @links: a list of targets pointing to where we can go next when traversing + * @num_links: number of links to other interconnect nodes + * @provider: points to the interconnect provider of this node + * @node_list: the list entry in the parent provider's "nodes" list + * @search_list: list used when walking the nodes graph + * @reverse: pointer to previous node when walking the nodes graph + * @is_traversed: flag that is used when walking the nodes graph + * @req_list: a list of QoS constraint requests associated with this node + * @avg_bw: aggregated value of average bandwidth requests from all consumers + * @peak_bw: aggregated value of peak bandwidth requests from all consumers + * @data: pointer to private data + */ +struct icc_node { + int id; + const char *name; + struct icc_node **links; + size_t num_links; + + struct icc_provider *provider; + struct list_head node_list; + struct list_head search_list; + struct icc_node *reverse; + u8 is_traversed:1; + struct hlist_head req_list; + u32 avg_bw; + u32 peak_bw; + void *data; +}; + +#if IS_ENABLED(CONFIG_INTERCONNECT) + +struct icc_node *icc_node_create(int id); +void icc_node_destroy(int id); +int icc_link_create(struct icc_node *node, const int dst_id); +int icc_link_destroy(struct icc_node *src, struct icc_node *dst); +void icc_node_add(struct icc_node *node, struct icc_provider *provider); +void icc_node_del(struct icc_node *node); +int icc_provider_add(struct icc_provider *provider); +int icc_provider_del(struct icc_provider *provider); + +#else + +static inline struct icc_node *icc_node_create(int id) +{ + return ERR_PTR(-ENOTSUPP); +} + +void icc_node_destroy(int id) +{ +} + +static inline int icc_link_create(struct icc_node *node, const int dst_id) +{ + return -ENOTSUPP; +} + +int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +{ + return -ENOTSUPP; +} + +void icc_node_add(struct icc_node *node, struct icc_provider *provider) +{ +} + +void icc_node_del(struct icc_node *node) +{ +} + +static inline int icc_provider_add(struct icc_provider *provider) +{ + return -ENOTSUPP; +} + +static inline int icc_provider_del(struct icc_provider *provider) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_INTERCONNECT */ + +#endif /* __LINUX_INTERCONNECT_PROVIDER_H */ diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h new file mode 100644 index 000000000000..c331afb3a2c8 --- /dev/null +++ b/include/linux/interconnect.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __LINUX_INTERCONNECT_H +#define __LINUX_INTERCONNECT_H + +#include +#include + +/* macros for converting to icc units */ +#define Bps_to_icc(x) ((x) / 1000) +#define kBps_to_icc(x) (x) +#define MBps_to_icc(x) ((x) * 1000) +#define GBps_to_icc(x) ((x) * 1000 * 1000) +#define bps_to_icc(x) (1) +#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0)) +#define Mbps_to_icc(x) ((x) * 1000 / 8) +#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8) + +struct icc_path; +struct device; + +#if IS_ENABLED(CONFIG_INTERCONNECT) + +struct icc_path *icc_get(struct device *dev, const int src_id, + const int dst_id); +void icc_put(struct icc_path *path); +int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); + +#else + +static inline struct icc_path *icc_get(struct device *dev, const int src_id, + const int dst_id) +{ + return NULL; +} + +static inline void icc_put(struct icc_path *path) +{ +} + +static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) +{ + return 0; +} + +#endif /* CONFIG_INTERCONNECT */ + +#endif /* __LINUX_INTERCONNECT_H */ -- cgit v1.2.3 From 87e3031b6fbd83ea83adf1bf9602bcce313ee787 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Wed, 16 Jan 2019 18:10:58 +0200 Subject: interconnect: Allow endpoints translation via DT Currently we support only platform data for specifying the interconnect endpoints. As now the endpoints are hard-coded into the consumer driver this may lead to complications when a single driver is used by multiple SoCs, which may have different interconnect topology. To avoid cluttering the consumer drivers, introduce a translation function to help us get the board specific interconnect data from device-tree. Reviewed-by: Evan Green Signed-off-by: Georgi Djakov Signed-off-by: Greg Kroah-Hartman --- include/linux/interconnect-provider.h | 17 +++++++++++++++++ include/linux/interconnect.h | 7 +++++++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 78208a754181..63caccadc2db 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -12,6 +12,21 @@ #define icc_units_to_bps(bw) ((bw) * 1000ULL) struct icc_node; +struct of_phandle_args; + +/** + * struct icc_onecell_data - driver data for onecell interconnect providers + * + * @num_nodes: number of nodes in this device + * @nodes: array of pointers to the nodes in this device + */ +struct icc_onecell_data { + unsigned int num_nodes; + struct icc_node *nodes[]; +}; + +struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, + void *data); /** * struct icc_provider - interconnect provider (controller) entity that might @@ -21,6 +36,7 @@ struct icc_node; * @nodes: internal list of the interconnect provider nodes * @set: pointer to device specific set operation function * @aggregate: pointer to device specific aggregate operation function + * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users * @data: pointer to private data @@ -31,6 +47,7 @@ struct icc_provider { int (*set)(struct icc_node *src, struct icc_node *dst); int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak); + struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; void *data; diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index c331afb3a2c8..dc25864755ba 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -27,6 +27,7 @@ struct device; struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); +struct icc_path *of_icc_get(struct device *dev, const char *name); void icc_put(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); @@ -38,6 +39,12 @@ static inline struct icc_path *icc_get(struct device *dev, const int src_id, return NULL; } +static inline struct icc_path *of_icc_get(struct device *dev, + const char *name) +{ + return NULL; +} + static inline void icc_put(struct icc_path *path) { } -- cgit v1.2.3 From c81d64d3dc1f2decf8f3a9354416b7496b5c389b Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 16 Jan 2019 11:25:21 -0700 Subject: io-64-nonatomic: add io{read|write}64[be]{_lo_hi|_hi_lo} macros This patch adds generic io{read|write}64[be]{_lo_hi|_hi_lo} macros if they are not already defined by the architecture. (As they are provided by the generic iomap library). The patch also points io{read|write}64[be] to the variant specified by the header name. This is because new drivers are encouraged to use ioreadXX, et al instead of readX[1], et al -- and mixing ioreadXX with readq is pretty ugly. [1] LDD3: section 9.4.2 Signed-off-by: Logan Gunthorpe Reviewed-by: Andy Shevchenko Cc: Christoph Hellwig Cc: Arnd Bergmann Cc: Alan Cox Cc: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- include/linux/io-64-nonatomic-hi-lo.h | 64 +++++++++++++++++++++++++++++++++++ include/linux/io-64-nonatomic-lo-hi.h | 64 +++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) (limited to 'include/linux') diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 862d786a904f..ae21b72cce85 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed hi_lo_writeq_relaxed #endif +#ifndef ioread64_hi_lo +#define ioread64_hi_lo ioread64_hi_lo +static inline u64 ioread64_hi_lo(void __iomem *addr) +{ + u32 low, high; + + high = ioread32(addr + sizeof(u32)); + low = ioread32(addr); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64_hi_lo +#define iowrite64_hi_lo iowrite64_hi_lo +static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + iowrite32(val >> 32, addr + sizeof(u32)); + iowrite32(val, addr); +} +#endif + +#ifndef ioread64be_hi_lo +#define ioread64be_hi_lo ioread64be_hi_lo +static inline u64 ioread64be_hi_lo(void __iomem *addr) +{ + u32 low, high; + + high = ioread32be(addr); + low = ioread32be(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64be_hi_lo +#define iowrite64be_hi_lo iowrite64be_hi_lo +static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + iowrite32be(val >> 32, addr); + iowrite32be(val, addr + sizeof(u32)); +} +#endif + +#ifndef ioread64 +#define ioread64_is_nonatomic +#define ioread64 ioread64_hi_lo +#endif + +#ifndef iowrite64 +#define iowrite64_is_nonatomic +#define iowrite64 iowrite64_hi_lo +#endif + +#ifndef ioread64be +#define ioread64be_is_nonatomic +#define ioread64be ioread64be_hi_lo +#endif + +#ifndef iowrite64be +#define iowrite64be_is_nonatomic +#define iowrite64be iowrite64be_hi_lo +#endif + #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index d042e7bb5adb..faaa842dbdb9 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed lo_hi_writeq_relaxed #endif +#ifndef ioread64_lo_hi +#define ioread64_lo_hi ioread64_lo_hi +static inline u64 ioread64_lo_hi(void __iomem *addr) +{ + u32 low, high; + + low = ioread32(addr); + high = ioread32(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64_lo_hi +#define iowrite64_lo_hi iowrite64_lo_hi +static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32(val, addr); + iowrite32(val >> 32, addr + sizeof(u32)); +} +#endif + +#ifndef ioread64be_lo_hi +#define ioread64be_lo_hi ioread64be_lo_hi +static inline u64 ioread64be_lo_hi(void __iomem *addr) +{ + u32 low, high; + + low = ioread32be(addr + sizeof(u32)); + high = ioread32be(addr); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64be_lo_hi +#define iowrite64be_lo_hi iowrite64be_lo_hi +static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32be(val, addr + sizeof(u32)); + iowrite32be(val >> 32, addr); +} +#endif + +#ifndef ioread64 +#define ioread64_is_nonatomic +#define ioread64 ioread64_lo_hi +#endif + +#ifndef iowrite64 +#define iowrite64_is_nonatomic +#define iowrite64 iowrite64_lo_hi +#endif + +#ifndef ioread64be +#define ioread64be_is_nonatomic +#define ioread64be ioread64be_lo_hi +#endif + +#ifndef iowrite64be +#define iowrite64be_is_nonatomic +#define iowrite64be iowrite64be_lo_hi +#endif + #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ -- cgit v1.2.3 From 51c48b310183ab6ba5419edfc6a8de889cc04521 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Sat, 19 Jan 2019 11:35:04 -0600 Subject: PCI: Probe bridge window attributes once at enumeration-time pci_bridge_check_ranges() determines whether a bridge supports the optional I/O and prefetchable memory windows and sets the flag bits in the bridge resources. This *could* be done once during enumeration except that the resource allocation code completely clears the flag bits, e.g., in the pci_assign_unassigned_bridge_resources() path. The problem with pci_bridge_check_ranges() in the resource allocation path is that we may allocate resources after devices have been claimed by drivers, and pci_bridge_check_ranges() *changes* the window registers to determine whether they're writable. This may break concurrent accesses to devices behind the bridge. Add a new pci_read_bridge_windows() to determine whether a bridge supports the optional windows, call it once during enumeration, remember the results, and change pci_bridge_check_ranges() so it doesn't touch the bridge windows but sets the flag bits based on those remembered results. Link: https://lore.kernel.org/linux-pci/1506151482-113560-1-git-send-email-wangzhou1@hisilicon.com Link: https://lists.gnu.org/archive/html/qemu-devel/2018-12/msg02082.html Reported-by: Yandong Xu Tested-by: Yandong Xu Signed-off-by: Bjorn Helgaas Cc: Michael S. Tsirkin Cc: Sagi Grimberg Cc: Ofer Hayut Cc: Roy Shterman Cc: Keith Busch Cc: Zhou Wang --- include/linux/pci.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index 65f1d8c2f082..40b327b814aa 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -373,6 +373,9 @@ struct pci_dev { bool match_driver; /* Skip attaching driver */ unsigned int transparent:1; /* Subtractive decode bridge */ + unsigned int io_window:1; /* Bridge has I/O window */ + unsigned int pref_window:1; /* Bridge has pref mem window */ + unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ unsigned int multifunction:1; /* Multi-function device */ unsigned int is_busmaster:1; /* Is busmaster */ -- cgit v1.2.3 From 856c395cfa63b94a1d8215182f0243c222f6f927 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 17 Jan 2019 23:27:11 -0800 Subject: net: introduce a knob to control whether to inherit devconf config There have been many people complaining about the inconsistent behaviors of IPv4 and IPv6 devconf when creating new network namespaces. Currently, for IPv4, we inherit all current settings from init_net, but for IPv6 we reset all setting to default. This patch introduces a new /proc file /proc/sys/net/core/devconf_inherit_init_net to control the behavior of whether to inhert sysctl current settings from init_net. This file itself is only available in init_net. As demonstrated below: Initial setup in init_net: # cat /proc/sys/net/ipv4/conf/all/rp_filter 2 # cat /proc/sys/net/ipv6/conf/all/accept_dad 1 Default value 0 (current behavior): # ip netns del test # ip netns add test # ip netns exec test cat /proc/sys/net/ipv4/conf/all/rp_filter 2 # ip netns exec test cat /proc/sys/net/ipv6/conf/all/accept_dad 0 Set to 1 (inherit from init_net): # echo 1 > /proc/sys/net/core/devconf_inherit_init_net # ip netns del test # ip netns add test # ip netns exec test cat /proc/sys/net/ipv4/conf/all/rp_filter 2 # ip netns exec test cat /proc/sys/net/ipv6/conf/all/accept_dad 1 Set to 2 (reset to default): # echo 2 > /proc/sys/net/core/devconf_inherit_init_net # ip netns del test # ip netns add test # ip netns exec test cat /proc/sys/net/ipv4/conf/all/rp_filter 0 # ip netns exec test cat /proc/sys/net/ipv6/conf/all/accept_dad 0 Set to a value out of range (invalid): # echo 3 > /proc/sys/net/core/devconf_inherit_init_net -bash: echo: write error: Invalid argument # echo -1 > /proc/sys/net/core/devconf_inherit_init_net -bash: echo: write error: Invalid argument Reported-by: Zhu Yanjun Reported-by: Tonghao Zhang Cc: Nicolas Dichtel Signed-off-by: Cong Wang Acked-by: Nicolas Dichtel Acked-by: Tonghao Zhang Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a57b9a853aab..e675ef97a426 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -630,6 +630,7 @@ struct netdev_queue { } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; +extern int sysctl_devconf_inherit_init_net; static inline bool net_has_fallback_tunnels(const struct net *net) { -- cgit v1.2.3 From 5b93ac542301026eff8954589cf59f801d03db3e Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Thu, 10 Jan 2019 09:32:02 +0530 Subject: OPP: Add support for parsing the 'opp-level' property Now that the OPP bindings are updated to include an optional 'opp-level' property, add support to parse it from device tree and store it as part of dev_pm_opp structure. Also add and export an helper 'dev_pm_opp_get_level()' that can be used to get the level value read from device tree when present. Reviewed-by: Stephen Boyd Acked-by: Viresh Kumar Signed-off-by: Rajendra Nayak Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross --- include/linux/pm_opp.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0a2a88e5a383..473d2c7516f0 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -86,6 +86,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); @@ -157,6 +159,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) return 0; } +static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) +{ + return 0; +} + static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; -- cgit v1.2.3 From ba5ea614622dca6d675b4cc8a97270569ae13a23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Mon, 21 Jan 2019 07:26:25 +0100 Subject: bridge: simplify ip_mc_check_igmp() and ipv6_mc_check_mld() calls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch refactors ip_mc_check_igmp(), ipv6_mc_check_mld() and their callers (more precisely, the Linux bridge) to not rely on the skb_trimmed parameter anymore. An skb with its tail trimmed to the IP packet length was initially introduced for the following three reasons: 1) To be able to verify the ICMPv6 checksum. 2) To be able to distinguish the version of an IGMP or MLD query. They are distinguishable only by their size. 3) To avoid parsing data for an IGMPv3 or MLDv2 report that is beyond the IP packet but still within the skb. The first case still uses a cloned and potentially trimmed skb to verfiy. However, there is no need to propagate it to the caller. For the second and third case explicit IP packet length checks were added. This hopefully makes ip_mc_check_igmp() and ipv6_mc_check_mld() easier to read and verfiy, as well as easier to use. Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller --- include/linux/igmp.h | 11 ++++++++++- include/linux/ip.h | 5 +++++ include/linux/ipv6.h | 6 ++++++ 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 119f53941c12..8b4348f69bc5 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -106,6 +107,14 @@ struct ip_mc_list { #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) +static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len) +{ + if (skb_transport_offset(skb) + ip_transport_len(skb) < len) + return -EINVAL; + + return pskb_may_pull(skb, len); +} + extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); @@ -130,6 +139,6 @@ extern void ip_mc_unmap(struct in_device *); extern void ip_mc_remap(struct in_device *); extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); -int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed); +int ip_mc_check_igmp(struct sk_buff *skb); #endif diff --git a/include/linux/ip.h b/include/linux/ip.h index 492bc6513533..482b7b7c9f30 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -34,4 +34,9 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) { return (struct iphdr *)skb_transport_header(skb); } + +static inline unsigned int ip_transport_len(const struct sk_buff *skb) +{ + return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb); +} #endif /* _LINUX_IP_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 495e834c1367..6d45ce784bea 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -104,6 +104,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) return (struct ipv6hdr *)skb_transport_header(skb); } +static inline unsigned int ipv6_transport_len(const struct sk_buff *skb) +{ + return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) - + skb_network_header_len(skb); +} + /* This structure contains results of exthdrs parsing as offsets from skb->nh. -- cgit v1.2.3 From 4b3087c7e37f9e499127201849e33960dc81da11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Mon, 21 Jan 2019 07:26:28 +0100 Subject: bridge: Snoop Multicast Router Advertisements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When multiple multicast routers are present in a broadcast domain then only one of them will be detectable via IGMP/MLD query snooping. The multicast router with the lowest IP address will become the selected and active querier while all other multicast routers will then refrain from sending queries. To detect such rather silent multicast routers, too, RFC4286 ("Multicast Router Discovery") provides a standardized protocol to detect multicast routers for multicast snooping switches. This patch implements the necessary MRD Advertisement message parsing and after successful processing adds such routers to the internal multicast router list. Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller --- include/linux/in.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/in.h b/include/linux/in.h index 31b493734763..435e7f2a513a 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -60,6 +60,11 @@ static inline bool ipv4_is_lbcast(__be32 addr) return addr == htonl(INADDR_BROADCAST); } +static inline bool ipv4_is_all_snoopers(__be32 addr) +{ + return addr == htonl(INADDR_ALLSNOOPERS_GROUP); +} + static inline bool ipv4_is_zeronet(__be32 addr) { return (addr & htonl(0xff000000)) == htonl(0x00000000); -- cgit v1.2.3 From 6815d8b09282c1df8e016bd2fabf25ada6d4462b Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 21 Jan 2019 18:41:39 +0800 Subject: ptp_qoriq: support external trigger stamp FIFO The external trigger stamp FIFO was introduced as a new feature for QorIQ 1588 timer IP block. This patch is to support it by adding a new dts property "fsl,extts-fifo". Any QorIQ 1588 timer supporting this feature is required to add this property in its dts node. In addition, the FIFO should be cleaned up before enabling external trigger interrupts. Otherwise, there will be interrupts immediately just after enabling external trigger interrupts. Signed-off-by: Yangbo Lu Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/linux/fsl/ptp_qoriq.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index c1f003aadcce..43b4b442f6a4 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -120,6 +120,8 @@ struct qoriq_ptp_registers { /* Bit definitions for the TMR_STAT register */ #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ #define STAT_VEC_MASK (0x3f) +#define ETS1_VLD (1<<24) +#define ETS2_VLD (1<<25) /* Bit definitions for the TMR_PRSC register */ #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ @@ -141,6 +143,7 @@ struct qoriq_ptp { struct ptp_clock *clock; struct ptp_clock_info caps; struct resource *rsrc; + bool extts_fifo_support; int irq; int phc_index; u64 alarm_interval; /* for periodic alarm */ -- cgit v1.2.3 From 19df7510d5cf077c2e88a7690fb7617e6d341beb Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 21 Jan 2019 18:41:42 +0800 Subject: ptp: add debugfs support for ptp_qoriq This patch is to add debugfs support for ptp_qoriq. Current debugfs supports to control fiper1/fiper2 loopback mode. If the loopback mode is enabled, the fiper1/fiper2 pulse is looped back into trigger1/ trigger2 input. This is very useful for validating hardware and driver without external hardware. Below is an example to enable fiper1 loopback. echo 1 > /sys/kernel/debug/2d10e00.ptp_clock/fiper1-loopback Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- include/linux/fsl/ptp_qoriq.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index 43b4b442f6a4..94e9797e434c 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -143,6 +143,8 @@ struct qoriq_ptp { struct ptp_clock *clock; struct ptp_clock_info caps; struct resource *rsrc; + struct dentry *debugfs_root; + struct device *dev; bool extts_fifo_support; int irq; int phc_index; @@ -169,4 +171,14 @@ static inline void qoriq_write(unsigned __iomem *addr, u32 val) iowrite32be(val, addr); } +#ifdef CONFIG_DEBUG_FS +void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp); +void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp); +#else +static inline void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp) +{ } +static inline void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp) +{ } +#endif + #endif -- cgit v1.2.3 From 51eea52d26d4939b788b7244c28cf47e902b4c4c Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 16 Jan 2019 16:13:31 +0100 Subject: pxa2xx: replace spi_master with spi_controller It's also a slave controller driver now, calling it "master" is slightly misleading. Signed-off-by: Lubomir Rintel Acked-by: Robert Jarzmik Signed-off-by: Mark Brown --- include/linux/spi/pxa2xx_spi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index b0674e330ef6..c1c59473cef9 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -22,7 +22,7 @@ struct dma_chan; /* device.platform_data for SSP controller devices */ -struct pxa2xx_spi_master { +struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; bool is_slave; @@ -54,7 +54,7 @@ struct pxa2xx_spi_chip { #include -extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); #endif #endif -- cgit v1.2.3 From a2d21848d9211dad5e786aa7368709ca8938834e Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Tue, 22 Jan 2019 11:42:24 +0200 Subject: regmap: regmap-irq: Add main status register support There is bunch of devices with multiple logical blocks which can generate interrupts. It's not a rare case that the interrupt reason registers are arranged so that there is own status/ack/mask register for each logical block. In some devices there is also a 'main interrupt register(s)' which can indicate what sub blocks have interrupts pending. When such a device is connected via slow bus like i2c the main part of interrupt handling latency can be caused by bus accesses. On systems where it is expected that only one (or few) sub blocks have active interrupts we can reduce the latency by only reading the main register and those sub registers which have active interrupts. Support this with regmap-irq for simple cases where main register does not require acking or masking. Signed-off-by: Matti Vaittinen Signed-off-by: Mark Brown --- include/linux/regmap.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'include/linux') diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 1781b6cb793c..daeec7dbd65c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1131,11 +1131,37 @@ struct regmap_irq { .reg_offset = (_id) / (_reg_bits), \ } +#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \ + { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] } + +struct regmap_irq_sub_irq_map { + unsigned int num_regs; + unsigned int *offset; +}; + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * * @name: Descriptive name for IRQ controller. * + * @main_status: Base main status register address. For chips which have + * interrupts arranged in separate sub-irq blocks with own IRQ + * registers and which have a main IRQ registers indicating + * sub-irq blocks with unhandled interrupts. For such chips fill + * sub-irq register information in status_base, mask_base and + * ack_base. + * @num_main_status_bits: Should be given to chips where number of meaningfull + * main status bits differs from num_regs. + * @sub_reg_offsets: arrays of mappings from main register bits to sub irq + * registers. First item in array describes the registers + * for first main status bit. Second array for second bit etc. + * Offset is given as sub register status offset to + * status_base. Should contain num_regs arrays. + * Can be provided for chips with more complex mapping than + * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... + * @num_main_regs: Number of 'main status' irq registers for chips which have + * main_status set. + * * @status_base: Base status register address. * @mask_base: Base mask register address. * @mask_writeonly: Base mask register is write only. @@ -1181,6 +1207,11 @@ struct regmap_irq { struct regmap_irq_chip { const char *name; + unsigned int main_status; + unsigned int num_main_status_bits; + struct regmap_irq_sub_irq_map *sub_reg_offsets; + int num_main_regs; + unsigned int status_base; unsigned int mask_base; unsigned int unmask_base; -- cgit v1.2.3 From f0125f1a559be1033055f44e511174aaa75b60cc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 23 Jan 2019 17:29:53 +0000 Subject: spi: Go back to immediate teardown Commit 412e6037324 ("spi: core: avoid waking pump thread from spi_sync instead run teardown delayed") introduced regressions on some boards, apparently connected to spi_mem not triggering shutdown properly any more. Since we've thus far been unable to figure out exactly where the breakage is revert the optimisation for now. Reported-by: Jon Hunter Signed-off-by: Mark Brown Cc: kernel@martin.sperl.org --- include/linux/spi/spi.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 79ad62e2487c..916bba47d156 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -334,7 +334,6 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @kworker: thread struct for message pump * @kworker_task: pointer to task for message pump kworker thread * @pump_messages: work struct for scheduling work to the message pump - * @pump_idle_teardown: work structure for scheduling a teardown delayed * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue * @idling: the device is entering idle state @@ -533,7 +532,6 @@ struct spi_controller { struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work pump_messages; - struct kthread_delayed_work pump_idle_teardown; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; -- cgit v1.2.3 From 52875a04f4b26e7ef30a288ea096f7cfec0e93cd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:20 -0800 Subject: bpf: verifier: remove dead code Instead of overwriting dead code with jmp -1 instructions remove it completely for root. Adjust verifier state and line info appropriately. v2: - adjust func_info (Alexei); - make sure first instruction retains line info (Alexei). v4: (Yonghong) - remove unnecessary if (!insn to remove) checks; - always keep last line info if first live instruction lacks one. v5: (Martin Lau) - improve and clarify comments. Signed-off-by: Jakub Kicinski Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..be9af6b4a9e4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -778,6 +778,7 @@ static inline bool bpf_dump_raw_ok(void) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); void bpf_clear_redirect_map(struct bpf_map *map); -- cgit v1.2.3 From 9e4c24e7ee7dfd3898269519103e823892b730d8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:23 -0800 Subject: bpf: verifier: record original instruction index The communication between the verifier and advanced JITs is based on instruction indexes. We have to keep them stable throughout the optimizations otherwise referring to a particular instruction gets messy quickly. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 573cca00a0e6..f3ae00ee5516 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -187,6 +187,7 @@ struct bpf_insn_aux_data { int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ u8 alu_state; /* used in combination with alu_limit */ + unsigned int orig_idx; /* original instruction index */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ -- cgit v1.2.3 From 08ca90afba255d05dc3253caa44056e7aecbe8c5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:24 -0800 Subject: bpf: notify offload JITs about optimizations Let offload JITs know when instructions are replaced and optimized out, so they can update their state appropriately. The optimizations are best effort, if JIT returns an error from any callback verifier will stop notifying it as state may now be out of sync, but the verifier continues making progress. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 7 +++++++ include/linux/bpf_verifier.h | 5 +++++ 2 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e734f163bd0b..3851529062ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -268,9 +268,15 @@ struct bpf_verifier_ops { }; struct bpf_prog_offload_ops { + /* verifier basic callbacks */ int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int (*finalize)(struct bpf_verifier_env *env); + /* verifier optimization callbacks (called after .finalize) */ + int (*replace_insn)(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); + int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); + /* program management callbacks */ int (*prepare)(struct bpf_prog *prog); int (*translate)(struct bpf_prog *prog); void (*destroy)(struct bpf_prog *prog); @@ -283,6 +289,7 @@ struct bpf_prog_offload { void *dev_priv; struct list_head offloads; bool dev_state; + bool opt_failed; void *jited_image; u32 jited_len; }; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f3ae00ee5516..0620e418dde5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -266,5 +266,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int bpf_prog_offload_finalize(struct bpf_verifier_env *env); +void +bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); +void +bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); #endif /* _LINUX_BPF_VERIFIER_H */ -- cgit v1.2.3 From 643fa9612bf1a29153eee46fd398117632f93cbe Mon Sep 17 00:00:00 2001 From: Chandan Rajendra Date: Wed, 12 Dec 2018 15:20:12 +0530 Subject: fscrypt: remove filesystem specific build config option In order to have a common code base for fscrypt "post read" processing for all filesystems which support encryption, this commit removes filesystem specific build config option (e.g. CONFIG_EXT4_FS_ENCRYPTION) and replaces it with a build option (i.e. CONFIG_FS_ENCRYPTION) whose value affects all the filesystems making use of fscrypt. Reviewed-by: Eric Biggers Signed-off-by: Chandan Rajendra Signed-off-by: Eric Biggers --- include/linux/fs.h | 4 +- include/linux/fscrypt.h | 416 +++++++++++++++++++++++++++++++++++++++- include/linux/fscrypt_notsupp.h | 231 ---------------------- include/linux/fscrypt_supp.h | 204 -------------------- 4 files changed, 410 insertions(+), 445 deletions(-) delete mode 100644 include/linux/fscrypt_notsupp.h delete mode 100644 include/linux/fscrypt_supp.h (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..ba7889bb9ef6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -698,7 +698,7 @@ struct inode { struct fsnotify_mark_connector __rcu *i_fsnotify_marks; #endif -#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +#ifdef CONFIG_FS_ENCRYPTION struct fscrypt_info *i_crypt_info; #endif @@ -1403,7 +1403,7 @@ struct super_block { void *s_security; #endif const struct xattr_handler **s_xattr; -#if IS_ENABLED(CONFIG_FS_ENCRYPTION) +#ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; #endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 952ab97af325..eec604840568 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -2,9 +2,8 @@ /* * fscrypt.h: declarations for per-file encryption * - * Filesystems that implement per-file encryption include this header - * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem - * is being built with encryption support or not. + * Filesystems that implement per-file encryption must include this header + * file. * * Copyright (C) 2015, Google, Inc. * @@ -15,6 +14,8 @@ #define _LINUX_FSCRYPT_H #include +#include +#include #define FS_CRYPTO_BLOCK_SIZE 16 @@ -42,11 +43,410 @@ struct fscrypt_name { /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 -#if __FS_HAS_ENCRYPTION -#include -#else -#include -#endif +#ifdef CONFIG_FS_ENCRYPTION +/* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* + * crypto operations for filesystems + */ +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + bool (*dummy_context)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned int max_namelen; +}; + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ +}; + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return (inode->i_crypt_info != NULL); +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode); +} + +/* crypto.c */ +extern void fscrypt_enqueue_decrypt_work(struct work_struct *); +extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); +extern void fscrypt_release_ctx(struct fscrypt_ctx *); +extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, + unsigned int, unsigned int, + u64, gfp_t); +extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, + unsigned int, u64); + +static inline struct page *fscrypt_control_page(struct page *page) +{ + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +} + +extern void fscrypt_restore_control_page(struct page *); + +/* policy.c */ +extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); +extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_has_permitted_context(struct inode *, struct inode *); +extern int fscrypt_inherit_context(struct inode *, struct inode *, + void *, bool); +/* keyinfo.c */ +extern int fscrypt_get_encryption_info(struct inode *); +extern void fscrypt_put_encryption_info(struct inode *); + +/* fname.c */ +extern int fscrypt_setup_filename(struct inode *, const struct qstr *, + int lookup, struct fscrypt_name *); + +static inline void fscrypt_free_filename(struct fscrypt_name *fname) +{ + kfree(fname->crypto_buf.name); +} + +extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, + struct fscrypt_str *); +extern void fscrypt_fname_free_buffer(struct fscrypt_str *); +extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); + +#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 + +/* Extracts the second-to-last ciphertext block; see explanation below */ +#define FSCRYPT_FNAME_DIGEST(name, len) \ + ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ + FS_CRYPTO_BLOCK_SIZE)) + +#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE + +/** + * fscrypt_digested_name - alternate identifier for an on-disk filename + * + * When userspace lists an encrypted directory without access to the key, + * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE + * bytes are shown in this abbreviated form (base64-encoded) rather than as the + * full ciphertext (base64-encoded). This is necessary to allow supporting + * filenames up to NAME_MAX bytes, since base64 encoding expands the length. + * + * To make it possible for filesystems to still find the correct directory entry + * despite not knowing the full on-disk name, we encode any filesystem-specific + * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, + * followed by the second-to-last ciphertext block of the filename. Due to the + * use of the CBC-CTS encryption mode, the second-to-last ciphertext block + * depends on the full plaintext. (Note that ciphertext stealing causes the + * last two blocks to appear "flipped".) This makes accidental collisions very + * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they + * share the same filesystem-specific hashes. + * + * However, this scheme isn't immune to intentional collisions, which can be + * created by anyone able to create arbitrary plaintext filenames and view them + * without the key. Making the "digest" be a real cryptographic hash like + * SHA-256 over the full ciphertext would prevent this, although it would be + * less efficient and harder to implement, especially since the filesystem would + * need to calculate it for each directory entry examined during a search. + */ +struct fscrypt_digested_name { + u32 hash; + u32 minor_hash; + u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; +}; + +/** + * fscrypt_match_name() - test whether the given name matches a directory entry + * @fname: the name being searched for + * @de_name: the name from the directory entry + * @de_name_len: the length of @de_name in bytes + * + * Normally @fname->disk_name will be set, and in that case we simply compare + * that to the name stored in the directory entry. The only exception is that + * if we don't have the key for an encrypted directory and a filename in it is + * very long, then we won't have the full disk_name and we'll instead need to + * match against the fscrypt_digested_name. + * + * Return: %true if the name matches, otherwise %false. + */ +static inline bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + if (unlikely(!fname->disk_name.name)) { + const struct fscrypt_digested_name *n = + (const void *)fname->crypto_buf.name; + if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) + return false; + if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) + return false; + return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), + n->digest, FSCRYPT_FNAME_DIGEST_SIZE); + } + + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); +} + +/* bio.c */ +extern void fscrypt_decrypt_bio(struct bio *); +extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, + struct bio *bio); +extern void fscrypt_pullback_bio_page(struct page **, bool); +extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, + unsigned int); + +/* hooks.c */ +extern int fscrypt_file_open(struct inode *inode, struct file *filp); +extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir); +extern int __fscrypt_prepare_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags); +extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); +extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, + struct fscrypt_str *disk_link); +extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); +#else /* !CONFIG_FS_ENCRYPTION */ + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return false; +} + +/* crypto.c */ +static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) +{ +} + +static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, + gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) +{ + return; +} + +static inline struct page *fscrypt_encrypt_page(const struct inode *inode, + struct page *page, + unsigned int len, + unsigned int offs, + u64 lblk_num, gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_decrypt_page(const struct inode *inode, + struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num) +{ + return -EOPNOTSUPP; +} + +static inline struct page *fscrypt_control_page(struct page *page) +{ + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +} + +static inline void fscrypt_restore_control_page(struct page *page) +{ + return; +} + +/* policy.c */ +static inline int fscrypt_ioctl_set_policy(struct file *filp, + const void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_has_permitted_context(struct inode *parent, + struct inode *child) +{ + return 0; +} + +static inline int fscrypt_inherit_context(struct inode *parent, + struct inode *child, + void *fs_data, bool preload) +{ + return -EOPNOTSUPP; +} + +/* keyinfo.c */ +static inline int fscrypt_get_encryption_info(struct inode *inode) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_put_encryption_info(struct inode *inode) +{ + return; +} + + /* fname.c */ +static inline int fscrypt_setup_filename(struct inode *dir, + const struct qstr *iname, + int lookup, struct fscrypt_name *fname) +{ + if (IS_ENCRYPTED(dir)) + return -EOPNOTSUPP; + + memset(fname, 0, sizeof(struct fscrypt_name)); + fname->usr_fname = iname; + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; +} + +static inline void fscrypt_free_filename(struct fscrypt_name *fname) +{ + return; +} + +static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, + u32 max_encrypted_len, + struct fscrypt_str *crypto_str) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) +{ + return; +} + +static inline int fscrypt_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} + +static inline bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + /* Encryption support disabled; use standard comparison */ + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); +} + +/* bio.c */ +static inline void fscrypt_decrypt_bio(struct bio *bio) +{ +} + +static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, + struct bio *bio) +{ +} + +static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) +{ + return; +} + +static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len) +{ + return -EOPNOTSUPP; +} + +/* hooks.c */ + +static inline int fscrypt_file_open(struct inode *inode, struct file *filp) +{ + if (IS_ENCRYPTED(inode)) + return -EOPNOTSUPP; + return 0; +} + +static inline int __fscrypt_prepare_link(struct inode *inode, + struct inode *dir) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_lookup(struct inode *dir, + struct dentry *dentry) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_symlink(struct inode *dir, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + + +static inline int __fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline const char *fscrypt_get_symlink(struct inode *inode, + const void *caddr, + unsigned int max_size, + struct delayed_call *done) +{ + return ERR_PTR(-EOPNOTSUPP); +} +#endif /* !CONFIG_FS_ENCRYPTION */ /** * fscrypt_require_key - require an inode's encryption key diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h deleted file mode 100644 index ee8b43e4c15a..000000000000 --- a/include/linux/fscrypt_notsupp.h +++ /dev/null @@ -1,231 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * fscrypt_notsupp.h - * - * This stubs out the fscrypt functions for filesystems configured without - * encryption support. - * - * Do not include this file directly. Use fscrypt.h instead! - */ -#ifndef _LINUX_FSCRYPT_H -#error "Incorrect include of linux/fscrypt_notsupp.h!" -#endif - -#ifndef _LINUX_FSCRYPT_NOTSUPP_H -#define _LINUX_FSCRYPT_NOTSUPP_H - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return false; -} - -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) -{ - return false; -} - -/* crypto.c */ -static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) -{ -} - -static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, - gfp_t gfp_flags) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) -{ - return; -} - -static inline struct page *fscrypt_encrypt_page(const struct inode *inode, - struct page *page, - unsigned int len, - unsigned int offs, - u64 lblk_num, gfp_t gfp_flags) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline int fscrypt_decrypt_page(const struct inode *inode, - struct page *page, - unsigned int len, unsigned int offs, - u64 lblk_num) -{ - return -EOPNOTSUPP; -} - -static inline struct page *fscrypt_control_page(struct page *page) -{ - WARN_ON_ONCE(1); - return ERR_PTR(-EINVAL); -} - -static inline void fscrypt_restore_control_page(struct page *page) -{ - return; -} - -/* policy.c */ -static inline int fscrypt_ioctl_set_policy(struct file *filp, - const void __user *arg) -{ - return -EOPNOTSUPP; -} - -static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) -{ - return -EOPNOTSUPP; -} - -static inline int fscrypt_has_permitted_context(struct inode *parent, - struct inode *child) -{ - return 0; -} - -static inline int fscrypt_inherit_context(struct inode *parent, - struct inode *child, - void *fs_data, bool preload) -{ - return -EOPNOTSUPP; -} - -/* keyinfo.c */ -static inline int fscrypt_get_encryption_info(struct inode *inode) -{ - return -EOPNOTSUPP; -} - -static inline void fscrypt_put_encryption_info(struct inode *inode) -{ - return; -} - - /* fname.c */ -static inline int fscrypt_setup_filename(struct inode *dir, - const struct qstr *iname, - int lookup, struct fscrypt_name *fname) -{ - if (IS_ENCRYPTED(dir)) - return -EOPNOTSUPP; - - memset(fname, 0, sizeof(struct fscrypt_name)); - fname->usr_fname = iname; - fname->disk_name.name = (unsigned char *)iname->name; - fname->disk_name.len = iname->len; - return 0; -} - -static inline void fscrypt_free_filename(struct fscrypt_name *fname) -{ - return; -} - -static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, - u32 max_encrypted_len, - struct fscrypt_str *crypto_str) -{ - return -EOPNOTSUPP; -} - -static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) -{ - return; -} - -static inline int fscrypt_fname_disk_to_usr(struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname) -{ - return -EOPNOTSUPP; -} - -static inline bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len) -{ - /* Encryption support disabled; use standard comparison */ - if (de_name_len != fname->disk_name.len) - return false; - return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); -} - -/* bio.c */ -static inline void fscrypt_decrypt_bio(struct bio *bio) -{ -} - -static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, - struct bio *bio) -{ -} - -static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) -{ - return; -} - -static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, - sector_t pblk, unsigned int len) -{ - return -EOPNOTSUPP; -} - -/* hooks.c */ - -static inline int fscrypt_file_open(struct inode *inode, struct file *filp) -{ - if (IS_ENCRYPTED(inode)) - return -EOPNOTSUPP; - return 0; -} - -static inline int __fscrypt_prepare_link(struct inode *inode, - struct inode *dir) -{ - return -EOPNOTSUPP; -} - -static inline int __fscrypt_prepare_rename(struct inode *old_dir, - struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry, - unsigned int flags) -{ - return -EOPNOTSUPP; -} - -static inline int __fscrypt_prepare_lookup(struct inode *dir, - struct dentry *dentry) -{ - return -EOPNOTSUPP; -} - -static inline int __fscrypt_prepare_symlink(struct inode *dir, - unsigned int len, - unsigned int max_len, - struct fscrypt_str *disk_link) -{ - return -EOPNOTSUPP; -} - -static inline int __fscrypt_encrypt_symlink(struct inode *inode, - const char *target, - unsigned int len, - struct fscrypt_str *disk_link) -{ - return -EOPNOTSUPP; -} - -static inline const char *fscrypt_get_symlink(struct inode *inode, - const void *caddr, - unsigned int max_size, - struct delayed_call *done) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -#endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h deleted file mode 100644 index 6456c6b2005f..000000000000 --- a/include/linux/fscrypt_supp.h +++ /dev/null @@ -1,204 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * fscrypt_supp.h - * - * Do not include this file directly. Use fscrypt.h instead! - */ -#ifndef _LINUX_FSCRYPT_H -#error "Incorrect include of linux/fscrypt_supp.h!" -#endif - -#ifndef _LINUX_FSCRYPT_SUPP_H -#define _LINUX_FSCRYPT_SUPP_H - -#include -#include - -/* - * fscrypt superblock flags - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - -/* - * crypto operations for filesystems - */ -struct fscrypt_operations { - unsigned int flags; - const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); - unsigned int max_namelen; -}; - -struct fscrypt_ctx { - union { - struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { - struct bio *bio; - struct work_struct work; - } r; - struct list_head free_list; /* Free list */ - }; - u8 flags; /* Flags */ -}; - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return (inode->i_crypt_info != NULL); -} - -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) -{ - return inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode); -} - -/* crypto.c */ -extern void fscrypt_enqueue_decrypt_work(struct work_struct *); -extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); -extern void fscrypt_release_ctx(struct fscrypt_ctx *); -extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, - unsigned int, unsigned int, - u64, gfp_t); -extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, - unsigned int, u64); - -static inline struct page *fscrypt_control_page(struct page *page) -{ - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; -} - -extern void fscrypt_restore_control_page(struct page *); - -/* policy.c */ -extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); -extern int fscrypt_ioctl_get_policy(struct file *, void __user *); -extern int fscrypt_has_permitted_context(struct inode *, struct inode *); -extern int fscrypt_inherit_context(struct inode *, struct inode *, - void *, bool); -/* keyinfo.c */ -extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *); - -/* fname.c */ -extern int fscrypt_setup_filename(struct inode *, const struct qstr *, - int lookup, struct fscrypt_name *); - -static inline void fscrypt_free_filename(struct fscrypt_name *fname) -{ - kfree(fname->crypto_buf.name); -} - -extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, - struct fscrypt_str *); -extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, - const struct fscrypt_str *, struct fscrypt_str *); - -#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 - -/* Extracts the second-to-last ciphertext block; see explanation below */ -#define FSCRYPT_FNAME_DIGEST(name, len) \ - ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ - FS_CRYPTO_BLOCK_SIZE)) - -#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE - -/** - * fscrypt_digested_name - alternate identifier for an on-disk filename - * - * When userspace lists an encrypted directory without access to the key, - * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE - * bytes are shown in this abbreviated form (base64-encoded) rather than as the - * full ciphertext (base64-encoded). This is necessary to allow supporting - * filenames up to NAME_MAX bytes, since base64 encoding expands the length. - * - * To make it possible for filesystems to still find the correct directory entry - * despite not knowing the full on-disk name, we encode any filesystem-specific - * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, - * followed by the second-to-last ciphertext block of the filename. Due to the - * use of the CBC-CTS encryption mode, the second-to-last ciphertext block - * depends on the full plaintext. (Note that ciphertext stealing causes the - * last two blocks to appear "flipped".) This makes accidental collisions very - * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they - * share the same filesystem-specific hashes. - * - * However, this scheme isn't immune to intentional collisions, which can be - * created by anyone able to create arbitrary plaintext filenames and view them - * without the key. Making the "digest" be a real cryptographic hash like - * SHA-256 over the full ciphertext would prevent this, although it would be - * less efficient and harder to implement, especially since the filesystem would - * need to calculate it for each directory entry examined during a search. - */ -struct fscrypt_digested_name { - u32 hash; - u32 minor_hash; - u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; -}; - -/** - * fscrypt_match_name() - test whether the given name matches a directory entry - * @fname: the name being searched for - * @de_name: the name from the directory entry - * @de_name_len: the length of @de_name in bytes - * - * Normally @fname->disk_name will be set, and in that case we simply compare - * that to the name stored in the directory entry. The only exception is that - * if we don't have the key for an encrypted directory and a filename in it is - * very long, then we won't have the full disk_name and we'll instead need to - * match against the fscrypt_digested_name. - * - * Return: %true if the name matches, otherwise %false. - */ -static inline bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len) -{ - if (unlikely(!fname->disk_name.name)) { - const struct fscrypt_digested_name *n = - (const void *)fname->crypto_buf.name; - if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) - return false; - if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) - return false; - return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), - n->digest, FSCRYPT_FNAME_DIGEST_SIZE); - } - - if (de_name_len != fname->disk_name.len) - return false; - return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); -} - -/* bio.c */ -extern void fscrypt_decrypt_bio(struct bio *); -extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, - struct bio *bio); -extern void fscrypt_pullback_bio_page(struct page **, bool); -extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, - unsigned int); - -/* hooks.c */ -extern int fscrypt_file_open(struct inode *inode, struct file *filp); -extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir); -extern int __fscrypt_prepare_rename(struct inode *old_dir, - struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry, - unsigned int flags); -extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); -extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, - unsigned int max_len, - struct fscrypt_str *disk_link); -extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, - unsigned int len, - struct fscrypt_str *disk_link); -extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, - unsigned int max_size, - struct delayed_call *done); - -#endif /* _LINUX_FSCRYPT_SUPP_H */ -- cgit v1.2.3 From f5e55e777cc93eae1416f0fa4908e8846b6d7825 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 22 Jan 2019 16:20:21 -0800 Subject: fscrypt: return -EXDEV for incompatible rename or link into encrypted dir Currently, trying to rename or link a regular file, directory, or symlink into an encrypted directory fails with EPERM when the source file is unencrypted or is encrypted with a different encryption policy, and is on the same mountpoint. It is correct for the operation to fail, but the choice of EPERM breaks tools like 'mv' that know to copy rather than rename if they see EXDEV, but don't know what to do with EPERM. Our original motivation for EPERM was to encourage users to securely handle their data. Encrypting files by "moving" them into an encrypted directory can be insecure because the unencrypted data may remain in free space on disk, where it can later be recovered by an attacker. It's much better to encrypt the data from the start, or at least try to securely delete the source data e.g. using the 'shred' program. However, the current behavior hasn't been effective at achieving its goal because users tend to be confused, hack around it, and complain; see e.g. https://github.com/google/fscrypt/issues/76. And in some cases it's actually inconsistent or unnecessary. For example, 'mv'-ing files between differently encrypted directories doesn't work even in cases where it can be secure, such as when in userspace the same passphrase protects both directories. Yet, you *can* already 'mv' unencrypted files into an encrypted directory if the source files are on a different mountpoint, even though doing so is often insecure. There are probably better ways to teach users to securely handle their files. For example, the 'fscrypt' userspace tool could provide a command that migrates unencrypted files into an encrypted directory, acting like 'shred' on the source files and providing appropriate warnings depending on the type of the source filesystem and disk. Receiving errors on unimportant files might also force some users to disable encryption, thus making the behavior counterproductive. It's desirable to make encryption as unobtrusive as possible. Therefore, change the error code from EPERM to EXDEV so that tools looking for EXDEV will fall back to a copy. This, of course, doesn't prevent users from still doing the right things to securely manage their files. Note that this also matches the behavior when a file is renamed between two project quota hierarchies; so there's precedent for using EXDEV for things other than mountpoints. xfstests generic/398 will require an update with this change. [Rewritten from an earlier patch series by Michael Halcrow.] Cc: Michael Halcrow Cc: Joe Richey Signed-off-by: Eric Biggers --- include/linux/fscrypt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index eec604840568..e5194fc3983e 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -489,7 +489,7 @@ static inline int fscrypt_require_key(struct inode *inode) * in an encrypted directory tree use the same encryption policy. * * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, - * -EPERM if the link would result in an inconsistent encryption policy, or + * -EXDEV if the link would result in an inconsistent encryption policy, or * another -errno code. */ static inline int fscrypt_prepare_link(struct dentry *old_dentry, @@ -519,7 +519,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry, * We also verify that the rename will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. * - * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the + * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the * rename would cause inconsistent encryption policies, or another -errno code. */ static inline int fscrypt_prepare_rename(struct inode *old_dir, -- cgit v1.2.3 From e355477ed9e4f401e3931043df97325d38552d54 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 18 Jan 2019 16:33:10 -0800 Subject: net/mlx5: Make mlx5_cmd_exec_cb() a safe API APIs that have deferred callbacks should have some kind of cleanup function that callers can use to fence the callbacks. Otherwise things like module unloading can lead to dangling function pointers, or worse. The IB MR code is the only place that calls this function and had a really poor attempt at creating this fence. Provide a good version in the core code as future patches will add more places that need this fence. Signed-off-by: Jason Gunthorpe Signed-off-by: Yishai Hadas Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- include/linux/mlx5/driver.h | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 4e444863054a..039c9398614c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -850,11 +850,30 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +struct mlx5_async_ctx { + struct mlx5_core_dev *dev; + atomic_t num_inflight; + struct wait_queue_head wait; +}; + +struct mlx5_async_work; + +typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); + +struct mlx5_async_work { + struct mlx5_async_ctx *ctx; + mlx5_async_cbk_t user_callback; +}; + +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, + struct mlx5_async_ctx *ctx); +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, + void *out, int out_size, mlx5_async_cbk_t callback, + struct mlx5_async_work *work); + int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size, mlx5_cmd_cbk_t callback, - void *context); int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); @@ -885,9 +904,10 @@ void mlx5_init_mkey_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen, - u32 *out, int outlen, - mlx5_cmd_cbk_t callback, void *context); + struct mlx5_async_ctx *async_ctx, u32 *in, + int inlen, u32 *out, int outlen, + mlx5_async_cbk_t callback, + struct mlx5_async_work *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen); -- cgit v1.2.3 From ef74f70e5a10cc2a78cc5529e564170cabcda9af Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Sat, 19 Jan 2019 15:42:42 -0500 Subject: gpio: add irq domain activate/deactivate functions This adds the two new functions gpiochip_irq_domain_activate and gpiochip_irq_domain_deactivate that can be used as the activate and deactivate functions in the struct irq_domain_ops. This is for situations where only gpiochip_{lock,unlock}_as_irq needs to be called. SPMI and SSBI GPIO are two users that will initially use these functions. Signed-off-by: Brian Masney Suggested-by: Stephen Boyd Reviewed-by: Stephen Boyd Signed-off-by: Linus Walleij --- include/linux/gpio/driver.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 07cddbf45186..01497910f023 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -472,6 +472,11 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); +int gpiochip_irq_domain_activate(struct irq_domain *domain, + struct irq_data *data, bool reserve); +void gpiochip_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *data); + void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int parent_irq, -- cgit v1.2.3 From 434a4315b9617bf1742bc64712bf44a208502f7f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 23 Jan 2019 07:31:58 +0100 Subject: net: phy: change phy_start_interrupts to phy_request_interrupt Now that we enable the interrupts in phy_start() we don't have to do it before. Therefore remove enabling interrupts from phy_start_interrupts() and rename this function to reflect the changed functionality. v2: - improve warning to clearly state that we fall back to polling Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- include/linux/phy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 1f3873a2ff29..70f83d0d7469 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1047,7 +1047,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); -int phy_start_interrupts(struct phy_device *phydev); +void phy_request_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); -- cgit v1.2.3 From 231baecdef7a906579925ccf1bd45aa734f32320 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 18 Jan 2019 22:48:00 -0800 Subject: crypto: clarify name of WEAK_KEY request flag CRYPTO_TFM_REQ_WEAK_KEY confuses newcomers to the crypto API because it sounds like it is requesting a weak key. Actually, it is requesting that weak keys be forbidden (for algorithms that have the notion of "weak keys"; currently only DES and XTS do). Also it is only one letter away from CRYPTO_TFM_RES_WEAK_KEY, with which it can be easily confused. (This in fact happened in the UX500 driver, though just in some debugging messages.) Therefore, make the intent clear by renaming it to CRYPTO_TFM_REQ_FORBID_WEAK_KEYS. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/linux/crypto.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index c3c98a62e503..f2565a103158 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -118,7 +118,7 @@ #define CRYPTO_TFM_REQ_MASK 0x000fff00 #define CRYPTO_TFM_RES_MASK 0xfff00000 -#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 +#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 -- cgit v1.2.3 From 275f22148e8720e84b180d9e0cdf8abfd69bac5b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 31 Dec 2018 22:22:40 +0100 Subject: ipc: rename old-style shmctl/semctl/msgctl syscalls The behavior of these system calls is slightly different between architectures, as determined by the CONFIG_ARCH_WANT_IPC_PARSE_VERSION symbol. Most architectures that implement the split IPC syscalls don't set that symbol and only get the modern version, but alpha, arm, microblaze, mips-n32, mips-n64 and xtensa expect the caller to pass the IPC_64 flag. For the architectures that so far only implement sys_ipc(), i.e. m68k, mips-o32, powerpc, s390, sh, sparc, and x86-32, we want the new behavior when adding the split syscalls, so we need to distinguish between the two groups of architectures. The method I picked for this distinction is to have a separate system call entry point: sys_old_*ctl() now uses ipc_parse_version, while sys_*ctl() does not. The system call tables of the five architectures are changed accordingly. As an additional benefit, we no longer need the configuration specific definition for ipc_parse_version(), it always does the same thing now, but simply won't get called on architectures with the modern interface. A small downside is that on architectures that do set ARCH_WANT_IPC_PARSE_VERSION, we now have an extra set of entry points that are never called. They only add a few bytes of bloat, so it seems better to keep them compared to adding yet another Kconfig symbol. I considered adding new syscall numbers for the IPC_64 variants for consistency, but decided against that for now. Signed-off-by: Arnd Bergmann --- include/linux/syscalls.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index fb63045a0fb6..938d8908b9e0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -717,6 +717,7 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqst /* ipc/msg.c */ asmlinkage long sys_msgget(key_t key, int msgflg); +asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg); @@ -726,6 +727,7 @@ asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, /* ipc/sem.c */ asmlinkage long sys_semget(key_t key, int nsems, int semflg); asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); +asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, unsigned nsops, const struct __kernel_timespec __user *timeout); @@ -734,6 +736,7 @@ asmlinkage long sys_semop(int semid, struct sembuf __user *sops, /* ipc/shm.c */ asmlinkage long sys_shmget(key_t key, size_t size, int flag); +asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); asmlinkage long sys_shmdt(char __user *shmaddr); -- cgit v1.2.3 From 4b7d248b3a1de483ffe9d05c1debbf32a544164d Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Tue, 22 Jan 2019 17:06:39 -0500 Subject: audit: move loginuid and sessionid from CONFIG_AUDITSYSCALL to CONFIG_AUDIT loginuid and sessionid (and audit_log_session_info) should be part of CONFIG_AUDIT scope and not CONFIG_AUDITSYSCALL since it is used in CONFIG_CHANGE, ANOM_LINK, FEATURE_CHANGE (and INTEGRITY_RULE), none of which are otherwise dependent on AUDITSYSCALL. Please see github issue https://github.com/linux-audit/audit-kernel/issues/104 Signed-off-by: Richard Guy Briggs [PM: tweaked subject line for better grep'ing] Signed-off-by: Paul Moore --- include/linux/audit.h | 42 +++++++++++++++++++++++------------------- include/linux/sched.h | 2 +- 2 files changed, 24 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index a625c29a2ea2..ecb5d317d6a2 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -159,6 +159,18 @@ extern int audit_update_lsm_rules(void); extern int audit_rule_change(int type, int seq, void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); +extern int audit_set_loginuid(kuid_t loginuid); + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return tsk->loginuid; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return tsk->sessionid; +} + extern u32 audit_enabled; #else /* CONFIG_AUDIT */ static inline __printf(4, 5) @@ -201,6 +213,17 @@ static inline int audit_log_task_context(struct audit_buffer *ab) } static inline void audit_log_task_info(struct audit_buffer *ab) { } + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return INVALID_UID; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return AUDIT_SID_UNSET; +} + #define audit_enabled AUDIT_OFF #endif /* CONFIG_AUDIT */ @@ -323,17 +346,6 @@ static inline void audit_ptrace(struct task_struct *t) extern unsigned int audit_serial(void); extern int auditsc_get_stamp(struct audit_context *ctx, struct timespec64 *t, unsigned int *serial); -extern int audit_set_loginuid(kuid_t loginuid); - -static inline kuid_t audit_get_loginuid(struct task_struct *tsk) -{ - return tsk->loginuid; -} - -static inline unsigned int audit_get_sessionid(struct task_struct *tsk) -{ - return tsk->sessionid; -} extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); @@ -519,14 +531,6 @@ static inline int auditsc_get_stamp(struct audit_context *ctx, { return 0; } -static inline kuid_t audit_get_loginuid(struct task_struct *tsk) -{ - return INVALID_UID; -} -static inline unsigned int audit_get_sessionid(struct task_struct *tsk) -{ - return AUDIT_SID_UNSET; -} static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, diff --git a/include/linux/sched.h b/include/linux/sched.h index 89541d248893..f9788bb122c5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -886,7 +886,7 @@ struct task_struct { struct callback_head *task_works; struct audit_context *audit_context; -#ifdef CONFIG_AUDITSYSCALL +#ifdef CONFIG_AUDIT kuid_t loginuid; unsigned int sessionid; #endif -- cgit v1.2.3 From 2fec30e245a3b46fef89c4cb1f74eefc5fbb29a6 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Wed, 23 Jan 2019 21:36:25 -0500 Subject: audit: add support for fcaps v3 V3 namespaced file capabilities were introduced in commit 8db6c34f1dbc ("Introduce v3 namespaced file capabilities") Add support for these by adding the "frootid" field to the existing fcaps fields in the NAME and BPRM_FCAPS records. Please see github issue https://github.com/linux-audit/audit-kernel/issues/103 Signed-off-by: Richard Guy Briggs Acked-by: Serge Hallyn [PM: comment tweak to fit an 80 char line width] Signed-off-by: Paul Moore --- include/linux/capability.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/capability.h b/include/linux/capability.h index f640dcbc880c..b769330e9380 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -14,7 +14,7 @@ #define _LINUX_CAPABILITY_H #include - +#include #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 @@ -25,11 +25,12 @@ typedef struct kernel_cap_struct { __u32 cap[_KERNEL_CAPABILITY_U32S]; } kernel_cap_t; -/* exact same as vfs_cap_data but in cpu endian and always filled completely */ +/* same as vfs_ns_cap_data but in cpu endian and always filled completely */ struct cpu_vfs_cap_data { __u32 magic_etc; kernel_cap_t permitted; kernel_cap_t inheritable; + kuid_t rootid; }; #define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) -- cgit v1.2.3 From 40852275a94afb3e836be9248399e036982d1a79 Mon Sep 17 00:00:00 2001 From: Micah Morton Date: Tue, 22 Jan 2019 14:42:09 -0800 Subject: LSM: add SafeSetID module that gates setid calls This change ensures that the set*uid family of syscalls in kernel/sys.c (setreuid, setuid, setresuid, setfsuid) all call ns_capable_common with the CAP_OPT_INSETID flag, so capability checks in the security_capable hook can know whether they are being called from within a set*uid syscall. This change is a no-op by itself, but is needed for the proposed SafeSetID LSM. Signed-off-by: Micah Morton Acked-by: Kees Cook Signed-off-by: James Morris --- include/linux/capability.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/capability.h b/include/linux/capability.h index f640dcbc880c..c3f9a4d558a0 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -209,6 +209,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); +extern bool ns_capable_setid(struct user_namespace *ns, int cap); #else static inline bool has_capability(struct task_struct *t, int cap) { @@ -240,6 +241,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) { return true; } +static inline bool ns_capable_setid(struct user_namespace *ns, int cap) +{ + return true; +} #endif /* CONFIG_MULTIUSER */ extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); -- cgit v1.2.3 From 6ba7d681aca22e53385bdb35b1d7662e61905760 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Jan 2019 15:22:03 -0800 Subject: rcu: Remove wrapper definitions for obsolete RCU update functions None of synchronize_rcu_bh, synchronize_rcu_bh_expedited, call_rcu_bh, rcu_barrier_bh, synchronize_sched, synchronize_sched_expedited, call_rcu_sched, rcu_barrier_sched, get_state_synchronize_sched, and cond_synchronize_sched are actually used. This commit therefore removes their trivial wrapper-function definitions. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 53 ------------------------------------------------ 1 file changed, 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4db8bcacc51a..0e39e0d2629e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -896,57 +896,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) return false; } - -/* Transitional pre-consolidation compatibility definitions. */ - -static inline void synchronize_rcu_bh(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_bh(void) -{ - rcu_barrier(); -} - -static inline void synchronize_sched(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_sched_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_sched(void) -{ - rcu_barrier(); -} - -static inline unsigned long get_state_synchronize_sched(void) -{ - return get_state_synchronize_rcu(); -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - cond_synchronize_rcu(oldstate); -} - #endif /* __LINUX_RCUPDATE_H */ -- cgit v1.2.3 From 2aa5503026ceaa8860697b93c9e5bbbcd025ba89 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 20 Nov 2018 08:29:35 -0800 Subject: rcu: Docbook for rcu_head_init() and rcu_head_after_call_rcu() This commit adds the missing asterisks required to make Sphinx pick up the current header comments for these two functions. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 0e39e0d2629e..632113946757 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -859,7 +859,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* Has the specified rcu_head structure been handed to call_rcu()? */ -/* +/** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * @@ -874,10 +874,10 @@ static inline void rcu_head_init(struct rcu_head *rhp) rhp->func = (rcu_callback_t)~0L; } -/* +/** * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. - * @func: The function passed to call_rcu() along with @rhp. + * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including -- cgit v1.2.3 From c98cac603f1ce7d00e2a802b5640bced3bc3c1f2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 21 Nov 2018 11:35:03 -0800 Subject: rcu: Rename rcu_check_callbacks() to rcu_sched_clock_irq() The name rcu_check_callbacks() arguably made sense back in the early 2000s when RCU was quite a bit simpler than it is today, but it has become quite misleading, especially with the advent of dyntick-idle and NO_HZ_FULL. The rcu_check_callbacks() function is RCU's hook into the scheduling-clock interrupt, and is now but one of many ways that callbacks get promoted to invocable state. This commit therefore changes the name to rcu_sched_clock_irq(), which is the same number of characters and clearly indicates this function's relation to the rest of the Linux kernel. In addition, for the sake of consistency, rcu_flavor_check_callbacks() is also renamed to rcu_flavor_sched_clock_irq(). While in the area, the header comments for both functions are reworked. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 632113946757..6f8f047c4068 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -89,7 +89,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active __read_mostly; -void rcu_check_callbacks(int user); +void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); -- cgit v1.2.3 From 423a86a610cad121742ebe698ef98a3b4c87b5dd Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 12 Dec 2018 14:37:10 -0800 Subject: rcu: Add sparse check to rcu_assign_pointer() The rcu_assign_pointer() function currently doesn't do any sparse checking on the assigned-to pointer. So its possible that a pointer that is not __rcu annotated is assigned with rcu_assign_pointer without sparse complaints. Because rcu_dereference() already does such checking, this commit makes rcu_assign_pointer() to do the same. The extra error could be helpful in cases where an RCU pointer is assigned with rcu_assign_pointer() but not annotated with __rcu. This doesn't generate any code in the normal case because __CHECKER__ is defined only in the context of sparse. This commit also renames rcu_dereference_sparse() to rcu_check_parse() since the checking now happens not only during derereferencing but also during assignment. Test: Introduced an rcu_assign_pointer in code and checked the output of sparse with and without this change. The change correctly causes sparse to throw an error. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6f8f047c4068..4a2cce4d4bd9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -309,16 +309,16 @@ static inline void rcu_preempt_sleep_check(void) { } */ #ifdef __CHECKER__ -#define rcu_dereference_sparse(p, space) \ +#define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ -#define rcu_dereference_sparse(p, space) +#define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ @@ -326,13 +326,13 @@ static inline void rcu_preempt_sleep_check(void) { } /* Dependency order vs. p above. */ \ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define rcu_dereference_raw(p) \ @@ -382,6 +382,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_assign_pointer(p, v) \ ({ \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ + rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ @@ -785,7 +786,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ - rcu_dereference_sparse(p, __rcu); \ + rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) -- cgit v1.2.3 From c8ca1aa774b20f182733d1661f3b6aa3105338e7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 10:06:46 -0800 Subject: srcu: Check for invalid idx argument in srcu_read_unlock() The current SRCU implementation has an idx argument of zero or one, and never anything else. This commit therefore adds a WARN_ON_ONCE() to complain if this restriction is violated. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c614375cd264..33cf83b9bda8 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -223,6 +223,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { + WARN_ON_ONCE(idx & ~0x1); rcu_lock_release(&(ssp)->dep_map); __srcu_read_unlock(ssp, idx); } -- cgit v1.2.3 From e81baf4cb19a9b428ba477fd0423f81672a58817 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 12:12:38 +0100 Subject: srcu: Remove srcu_queue_delayed_work_on() srcu_queue_delayed_work_on() disables preemption (and therefore CPU hotplug in RCU's case) and then checks based on its own accounting if a CPU is online. If the CPU is online it uses queue_delayed_work_on() otherwise it fallbacks to queue_delayed_work(). The problem here is that queue_work() on -RT does not work with disabled preemption. queue_work_on() works also on an offlined CPU. queue_delayed_work_on() has the problem that it is possible to program a timer on an offlined CPU. This timer will fire once the CPU is online again. But until then, the timer remains programmed and nothing will happen. Add a local timer which will fire (as requested per delay) on the local CPU and then enqueue the work on the specific CPU. RCUtorture testing with SRCU-P for 24h showed no problems. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- include/linux/srcutree.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 6f292bd3e7db..0faa978c9880 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -45,7 +45,8 @@ struct srcu_data { unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ - struct delayed_work work; /* Context for CB invoking. */ + struct timer_list delay_work; /* Delay for CB invoking */ + struct work_struct work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct srcu_node *mynode; /* Leaf srcu_node. */ unsigned long grpmask; /* Mask for leaf srcu_node */ -- cgit v1.2.3 From 3a6cb58f159e64241b2af9374acad41a70939349 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 10 Dec 2018 09:44:52 -0800 Subject: rcutorture: Add grace period after CPU offline Beyond a certain point in the CPU-hotplug offline process, timers get stranded on the outgoing CPU, and won't fire until that CPU comes back online, which might well be never. This commit therefore adds a hook in torture_onoff_init() that is invoked from torture_offline(), which rcutorture uses to occasionally wait for a grace period. This should result in failures for RCU implementations that rely on stranded timers eventually firing in the absence of the CPU coming back online. Reported-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/torture.h b/include/linux/torture.h index 48fad21109fc..f2d3bcbf4337 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -50,11 +50,12 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_onl, int *min_onl, int *max_onl); -int torture_onoff_init(long ooholdoff, long oointerval); +int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); void torture_onoff_stats(void); bool torture_onoff_failures(void); -- cgit v1.2.3 From 9b28aa1d0eae1be1016c8f4ba504545caff01da3 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 12 Dec 2018 23:59:13 +0000 Subject: platform_data/mlxreg: Document fixes for core platform data Remove "led" from the description, since the structure "mlxreg_core_platform_data" is used not only for led data. Signed-off-by: Vadim Pasternak Signed-off-by: Darren Hart (VMware) --- include/linux/platform_data/mlxreg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 19f5cb618c55..d823713f94ec 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -107,9 +107,9 @@ struct mlxreg_core_item { /** * struct mlxreg_core_platform_data - platform data: * - * @led_data: led private data; + * @data: instance private data; * @regmap: register map of parent device; - * @counter: number of led instances; + * @counter: number of instances; */ struct mlxreg_core_platform_data { struct mlxreg_core_data *data; -- cgit v1.2.3 From 946e4e02b11889cb161b15ff4712a8ba21a50eb6 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 12 Dec 2018 23:59:14 +0000 Subject: platform_data/mlxreg: Add capability field to core platform data Add capability field to "mlxreg_core_platform_data" structure. The purpose of this register is to provide additional info to platform driver through the atribute related capability register. Signed-off-by: Vadim Pasternak Signed-off-by: Darren Hart (VMware) --- include/linux/platform_data/mlxreg.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index d823713f94ec..1b2f86f96743 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -61,6 +61,7 @@ struct mlxreg_hotplug_device { * @reg: attribute register; * @mask: attribute access mask; * @bit: attribute effective bit; + * @capability: attribute capability register; * @mode: access mode; * @np - pointer to node platform associated with attribute; * @hpdev - hotplug device data; @@ -72,6 +73,7 @@ struct mlxreg_core_data { u32 reg; u32 mask; u32 bit; + u32 capability; umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; -- cgit v1.2.3 From a7b76c8857692b0fce063b94ed83da11c396d341 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:05 -0500 Subject: bpf: JIT blinds support JMP32 This patch adds JIT blinds support for JMP32. Like BPF_JMP_REG/IMM, JMP32 version are needed for building raw bpf insn. They are added to both include/linux/filter.h and tools/include/linux/filter.h. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index be9af6b4a9e4..e4b473f85b46 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -277,6 +277,26 @@ struct sock_reuseport; .off = OFF, \ .imm = IMM }) +/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + /* Unconditional jumps, goto pc + off16 */ #define BPF_JMP_A(OFF) \ -- cgit v1.2.3 From 8d5d0cfb63cbcb4005e19a332b31d687b1d01e58 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Mon, 3 Dec 2018 09:56:23 +0000 Subject: sched/topology: Introduce a sysctl for Energy Aware Scheduling In its current state, Energy Aware Scheduling (EAS) starts automatically on asymmetric platforms having an Energy Model (EM). However, there are users who want to have an EM (for thermal management for example), but don't want EAS with it. In order to let users disable EAS explicitly, introduce a new sysctl called 'sched_energy_aware'. It is enabled by default so that EAS can start automatically on platforms where it makes sense. Flipping it to 0 rebuilds the scheduling domains and disables EAS. Signed-off-by: Quentin Perret Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: adharmap@codeaurora.org Cc: chris.redpath@arm.com Cc: currojerez@riseup.net Cc: dietmar.eggemann@arm.com Cc: edubezval@gmail.com Cc: gregkh@linuxfoundation.org Cc: javi.merino@kernel.org Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: morten.rasmussen@arm.com Cc: patrick.bellasi@arm.com Cc: pkondeti@codeaurora.org Cc: rjw@rjwysocki.net Cc: skannan@codeaurora.org Cc: smuckle@google.com Cc: srinivas.pandruvada@linux.intel.com Cc: thara.gopinath@linaro.org Cc: tkjos@google.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Cc: viresh.kumar@linaro.org Link: https://lkml.kernel.org/r/20181203095628.11858-11-quentin.perret@arm.com Signed-off-by: Ingo Molnar --- include/linux/sched/sysctl.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index a9c32daeb9d8..99ce6d728df7 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -83,4 +83,11 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +extern unsigned int sysctl_sched_energy_aware; +extern int sched_energy_aware_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + #endif /* _LINUX_SCHED_SYSCTL_H */ -- cgit v1.2.3 From fdce60787f6215607dc7ac910cbaf4416684b589 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 13 Dec 2018 12:22:32 +0100 Subject: reset: sunxi: declare sun6i_reset_init in a header file Avoid declaring extern functions in c files. To make sure function definition and usage don't get out of sync, declare sun6i_reset_init in a common header. Suggested-by: Stephen Rothwell Signed-off-by: Philipp Zabel --- include/linux/reset/sunxi.h | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 include/linux/reset/sunxi.h (limited to 'include/linux') diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h new file mode 100644 index 000000000000..1ad7fffb413e --- /dev/null +++ b/include/linux/reset/sunxi.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_RESET_SUNXI_H__ +#define __LINUX_RESET_SUNXI_H__ + +void __init sun6i_reset_init(void); + +#endif /* __LINUX_RESET_SUNXI_H__ */ -- cgit v1.2.3 From cdbeb315ed8dcc142a68054899cedd6e4f1fea3f Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 13 Dec 2018 12:24:36 +0100 Subject: reset: socfpga: declare socfpga_reset_init in a header file Avoid declaring extern functions in c files. To make sure function definition and usage don't get out of sync, declare socfpga_reset_init in a common header. Suggested-by: Stephen Rothwell Signed-off-by: Philipp Zabel Acked-by: Dinh Nguyen --- include/linux/reset/socfpga.h | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 include/linux/reset/socfpga.h (limited to 'include/linux') diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h new file mode 100644 index 000000000000..b11a2047c342 --- /dev/null +++ b/include/linux/reset/socfpga.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_RESET_SOCFPGA_H__ +#define __LINUX_RESET_SOCFPGA_H__ + +void __init socfpga_reset_init(void); + +#endif /* __LINUX_RESET_SOCFPGA_H__ */ -- cgit v1.2.3 From 83f529281d7aa42b10c2c5cb64fcbd2c7cab4409 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 27 Jan 2019 19:18:57 +0100 Subject: netfilter: ipv4: remove useless export_symbol Only one caller; place it where needed and get rid of the EXPORT_SYMBOL. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv4.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 95ab5cc64422..082e2c41b7ff 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -25,7 +25,6 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); -int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); #else static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) @@ -37,11 +36,6 @@ static inline int nf_ip_route(struct net *net, struct dst_entry **dst, { return -EOPNOTSUPP; } -static inline int nf_ip_reroute(struct sk_buff *skb, - const struct nf_queue_entry *entry) -{ - return -EOPNOTSUPP; -} #endif /* CONFIG_INET */ #endif /*__LINUX_IP_NETFILTER_H*/ -- cgit v1.2.3 From 87eff9af7efb154cc4a940ed12efc803a0bf3fba Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Tue, 22 Jan 2019 23:18:21 +0200 Subject: pinctrl: remove pinctrl/machine.h inclusion from pinctrl/pinconf.h The change adds explicit inclusion of linux/pinctrl/machine.h header to the only needed pinctrl-madera-core.c file, and therefore inclusion of pinctrl/machine.h header from pinctrl/pinconf.h can be removed. The change is preparatory to a follow-up reversal of commit f07512e615dd ("pinctrl/pinconfig: add debug interface"). Signed-off-by: Vladimir Zapolskiy Cc: Charles Keepax Reviewed-by Richard Fitzgerald Signed-off-by: Linus Walleij --- include/linux/pinctrl/pinconf.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 8dd85d302b90..109468d9d849 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -14,8 +14,6 @@ #ifdef CONFIG_PINCONF -#include - struct pinctrl_dev; struct seq_file; -- cgit v1.2.3 From e73339037f6b6d65e84f5fd42e56dd3cdf0d9e9c Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Tue, 22 Jan 2019 23:18:22 +0200 Subject: pinctrl: remove unused 'pinconf-config' debugfs interface The main goal of the change is to remove .pin_config_dbg_parse_modify callback before a driver with its support appears. So far the in-kernel interface did not attract any users since its introduction 5 years ago. Originally .pin_config_dbg_parse_modify callback and the associated 'pinconf-config' debugfs file were introduced in commit f07512e615dd ("pinctrl/pinconfig: add debug interface"), a short description of 'pinconf-config' usage for debugging can be expressed this way: Write to 'pinconf-config' (see pinconf_dbg_config_write() function): % echo -n modify $map_type $device_name $state_name $pin_name $config > \ /sys/kernel/debug/pinctrl/$pinctrl/pinconf-config It supposes to update a global (therefore single!) 'pinconf_dbg_conf' variable with an alternative setting, the arguments should match an existing pinconf device and some registered pinctrl mapping 'map': * $map_type is either 'config_pin' or 'config_group', it should match 'map->type' value of PIN_MAP_TYPE_CONFIGS_PIN or PIN_MAP_TYPE_CONFIGS_GROUP accordingly, * $device_name should match 'map->dev_name' string value, * $state_name should match 'map->name' string value, * $pin_name should match 'map->data.configs.group_or_pin' string value, If all above has matched, then $config is a new value to be set by calling pinconfops->pin_config_dbg_parse_modify(pctldev, config, matched_config). After a successful write into 'pinconf-config' a user can read the file to get information about that single modified pin configuration. The fact is .pin_config_dbg_parse_modify callback has never been defined in 'struct pinconf_ops' of any pinconf driver, thus an actual modification of a pin or group state on any present pinconf controller does not happen, and it declares that all related code is no more than dead code. I discovered the issue while attempting to add .pin_config_dbg_parse_modify support in some drivers and found that too short 'MAX_NAME_LEN' set by drivers/pinctrl/pinconf.c:372:#define MAX_NAME_LEN 15 is practically insufficient to store a regular pinctrl device name, which are like 'e6060000.pin-controller-sh-pfc' or pin names like 'MX6QDL_PAD_ENET_REF_CLK', thus it is another indicator that the code is barely usable, insufficiently tested and unprepossessing. Of course it might be possible to increase MAX_NAME_LEN, and then add .pin_config_dbg_parse_modify callbacks to the drivers, but the whole idea of such a limited debug option looks inviable. A more flexible way to functionally substitute the original approach is to implicitly or explicitly use pinctrl_select_state() function whenever needed. Signed-off-by: Vladimir Zapolskiy Cc: Laurent Meunier Cc: Masahiro Yamada Cc: Russell King Signed-off-by: Linus Walleij --- include/linux/pinctrl/pinconf.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 109468d9d849..93c9dd133e9d 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -29,7 +29,6 @@ struct seq_file; * @pin_config_group_get: get configurations for an entire pin group; should * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. * @pin_config_group_set: configure all pins in a group - * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration * @pin_config_dbg_show: optional debugfs display hook that will provide * per-device info for a certain pin in debugfs * @pin_config_group_dbg_show: optional debugfs display hook that will provide @@ -55,9 +54,6 @@ struct pinconf_ops { unsigned selector, unsigned long *configs, unsigned num_configs); - int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev, - const char *arg, - unsigned long *config); void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset); -- cgit v1.2.3 From 64515dc899df898991b2b7e56f69f56f014ea888 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 28 Jan 2019 19:27:55 +0200 Subject: qed: Add infrastructure for error detection and recovery This patch adds the detection and handling of a parity error ("process kill event"), including the update of the protocol drivers, and the prevention of any HW access that will lead to device access towards the host while recovery is in progress. It also provides the means for the protocol drivers to trigger a recovery process on their decision. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_if.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 5f818fda96bd..35170f74ed80 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -763,6 +763,7 @@ struct qed_probe_params { u32 dp_module; u8 dp_level; bool is_vf; + bool recov_in_prog; }; #define QED_DRV_VER_STR_SIZE 12 @@ -809,6 +810,7 @@ struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, struct qed_link_output *link); + void (*schedule_recovery_handler)(void *dev); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); void (*get_protocol_tlv_data)(void *dev, void *data); @@ -1056,6 +1058,24 @@ struct qed_common_ops { int (*db_recovery_del)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); +/** + * @brief recovery_process - Trigger a recovery process + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*recovery_process)(struct qed_dev *cdev); + +/** + * @brief recovery_prolog - Execute the prolog operations of a recovery process + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*recovery_prolog)(struct qed_dev *cdev); + /** * @brief update_drv_state - API to inform the change in the driver state. * -- cgit v1.2.3 From ccc67ef50b9085b895738d7720840eb6fe98745e Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 28 Jan 2019 19:27:56 +0200 Subject: qede: Error recovery process This patch adds the error recovery process in the qede driver. The process includes a partial/customized driver unload and load, which allows it to look like a short suspend period to the kernel while preserving the net devices' state. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qede_rdma.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 9904617a9730..5a00c7a473bf 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -74,21 +74,23 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv); bool qede_rdma_supported(struct qede_dev *dev); #if IS_ENABLED(CONFIG_QED_RDMA) -int qede_rdma_dev_add(struct qede_dev *dev); +int qede_rdma_dev_add(struct qede_dev *dev, bool recovery); void qede_rdma_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev); -void qede_rdma_dev_remove(struct qede_dev *dev); +void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); void qede_rdma_event_changeaddr(struct qede_dev *edr); #else -static inline int qede_rdma_dev_add(struct qede_dev *dev) +static inline int qede_rdma_dev_add(struct qede_dev *dev, + bool recovery) { return 0; } static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} -static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} +static inline void qede_rdma_dev_remove(struct qede_dev *dev, + bool recovery) {} static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} #endif #endif -- cgit v1.2.3 From c8aa703822bf811269975cf7251b5eaad4c38e9c Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 08:53:53 -0800 Subject: net/flow_dissector: move bpf case into __skb_flow_bpf_dissect This way, we can reuse it for flow dissector in BPF_PROG_TEST_RUN. No functional changes. Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/skbuff.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..be762fc34ff3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) } #endif +struct bpf_flow_keys; +bool __skb_flow_bpf_dissect(struct bpf_prog *prog, + const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + struct bpf_flow_keys *flow_keys); bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, -- cgit v1.2.3 From b7a1848e8398b8396c990279e6a10272d818577e Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 08:53:54 -0800 Subject: bpf: add BPF_PROG_TEST_RUN support for flow dissector The input is packet data, the output is struct bpf_flow_key. This should make it easy to test flow dissector programs without elaborate setup. Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3851529062ec..0394f1f9213b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -404,6 +404,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); /* an array of programs to be executed under rcu_lock. * -- cgit v1.2.3 From 2b6e492467c78183bb629bb0a100ea3509b615a5 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Wed, 23 Jan 2019 17:44:16 +0300 Subject: device property: Fix the length used in PROPERTY_ENTRY_STRING() With string type property entries we need to use sizeof(const char *) instead of the number of characters as the length of the entry. If the string was shorter then sizeof(const char *), attempts to read it would have failed with -EOVERFLOW. The problem has been hidden because all build-in string properties have had a string longer then 8 characters until now. Fixes: a85f42047533 ("device property: helper macros for property entry creation") Cc: 4.5+ # 4.5+ Signed-off-by: Heikki Krogerus Reviewed-by: Andy Shevchenko Signed-off-by: Rafael J. Wysocki --- include/linux/property.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index 3789ec755fb6..65d3420dd5d1 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -258,7 +258,7 @@ struct property_entry { #define PROPERTY_ENTRY_STRING(_name_, _val_) \ (struct property_entry) { \ .name = _name_, \ - .length = sizeof(_val_), \ + .length = sizeof(const char *), \ .type = DEV_PROP_STRING, \ { .value = { .str = _val_ } }, \ } -- cgit v1.2.3 From 625c85a62cb7d3c79f6e16de3cfa972033658250 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 25 Jan 2019 12:53:07 +0530 Subject: cpufreq: Use struct kobj_attribute instead of struct global_attr The cpufreq_global_kobject is created using kobject_create_and_add() helper, which assigns the kobj_type as dynamic_kobj_ktype and show/store routines are set to kobj_attr_show() and kobj_attr_store(). These routines pass struct kobj_attribute as an argument to the show/store callbacks. But all the cpufreq files created using the cpufreq_global_kobject expect the argument to be of type struct attribute. Things work fine currently as no one accesses the "attr" argument. We may not see issues even if the argument is used, as struct kobj_attribute has struct attribute as its first element and so they will both get same address. But this is logically incorrect and we should rather use struct kobj_attribute instead of struct global_attr in the cpufreq core and drivers and the show/store callbacks should take struct kobj_attribute as argument instead. This bug is caught using CFI CLANG builds in android kernel which catches mismatch in function prototypes for such callbacks. Reported-by: Donghee Han Reported-by: Sangkyu Kim Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- include/linux/cpufreq.h | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index bd7fbd6a4478..c19142911554 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name) static struct freq_attr _name = \ __ATTR(_name, 0200, NULL, store_##_name) -struct global_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *a, struct attribute *b, - const char *c, size_t count); -}; - #define define_one_global_ro(_name) \ -static struct global_attr _name = \ +static struct kobj_attribute _name = \ __ATTR(_name, 0444, show_##_name, NULL) #define define_one_global_rw(_name) \ -static struct global_attr _name = \ +static struct kobj_attribute _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) -- cgit v1.2.3 From bc3843d4d357061d92e7800c7da342e2d068772c Mon Sep 17 00:00:00 2001 From: Nava kishore Manne Date: Fri, 25 Jan 2019 13:16:52 +0530 Subject: firmware: xilinx: Add reset API's This Patch Adds reset API's to support release, assert and status functionalities by using firmware interface. Signed-off-by: Nava kishore Manne Signed-off-by: Michal Simek --- include/linux/firmware/xlnx-zynqmp.h | 136 +++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) (limited to 'include/linux') diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 3c3c28eff56a..07c587a0b06e 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -34,6 +34,8 @@ enum pm_api_id { PM_GET_API_VERSION = 1, + PM_RESET_ASSERT = 17, + PM_RESET_GET_STATUS, PM_IOCTL = 34, PM_QUERY_DATA, PM_CLOCK_ENABLE, @@ -75,6 +77,137 @@ enum pm_query_id { PM_QID_CLOCK_GET_NUM_CLOCKS = 12, }; +enum zynqmp_pm_reset_action { + PM_RESET_ACTION_RELEASE, + PM_RESET_ACTION_ASSERT, + PM_RESET_ACTION_PULSE, +}; + +enum zynqmp_pm_reset { + ZYNQMP_PM_RESET_START = 1000, + ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START, + ZYNQMP_PM_RESET_PCIE_BRIDGE, + ZYNQMP_PM_RESET_PCIE_CTRL, + ZYNQMP_PM_RESET_DP, + ZYNQMP_PM_RESET_SWDT_CRF, + ZYNQMP_PM_RESET_AFI_FM5, + ZYNQMP_PM_RESET_AFI_FM4, + ZYNQMP_PM_RESET_AFI_FM3, + ZYNQMP_PM_RESET_AFI_FM2, + ZYNQMP_PM_RESET_AFI_FM1, + ZYNQMP_PM_RESET_AFI_FM0, + ZYNQMP_PM_RESET_GDMA, + ZYNQMP_PM_RESET_GPU_PP1, + ZYNQMP_PM_RESET_GPU_PP0, + ZYNQMP_PM_RESET_GPU, + ZYNQMP_PM_RESET_GT, + ZYNQMP_PM_RESET_SATA, + ZYNQMP_PM_RESET_ACPU3_PWRON, + ZYNQMP_PM_RESET_ACPU2_PWRON, + ZYNQMP_PM_RESET_ACPU1_PWRON, + ZYNQMP_PM_RESET_ACPU0_PWRON, + ZYNQMP_PM_RESET_APU_L2, + ZYNQMP_PM_RESET_ACPU3, + ZYNQMP_PM_RESET_ACPU2, + ZYNQMP_PM_RESET_ACPU1, + ZYNQMP_PM_RESET_ACPU0, + ZYNQMP_PM_RESET_DDR, + ZYNQMP_PM_RESET_APM_FPD, + ZYNQMP_PM_RESET_SOFT, + ZYNQMP_PM_RESET_GEM0, + ZYNQMP_PM_RESET_GEM1, + ZYNQMP_PM_RESET_GEM2, + ZYNQMP_PM_RESET_GEM3, + ZYNQMP_PM_RESET_QSPI, + ZYNQMP_PM_RESET_UART0, + ZYNQMP_PM_RESET_UART1, + ZYNQMP_PM_RESET_SPI0, + ZYNQMP_PM_RESET_SPI1, + ZYNQMP_PM_RESET_SDIO0, + ZYNQMP_PM_RESET_SDIO1, + ZYNQMP_PM_RESET_CAN0, + ZYNQMP_PM_RESET_CAN1, + ZYNQMP_PM_RESET_I2C0, + ZYNQMP_PM_RESET_I2C1, + ZYNQMP_PM_RESET_TTC0, + ZYNQMP_PM_RESET_TTC1, + ZYNQMP_PM_RESET_TTC2, + ZYNQMP_PM_RESET_TTC3, + ZYNQMP_PM_RESET_SWDT_CRL, + ZYNQMP_PM_RESET_NAND, + ZYNQMP_PM_RESET_ADMA, + ZYNQMP_PM_RESET_GPIO, + ZYNQMP_PM_RESET_IOU_CC, + ZYNQMP_PM_RESET_TIMESTAMP, + ZYNQMP_PM_RESET_RPU_R50, + ZYNQMP_PM_RESET_RPU_R51, + ZYNQMP_PM_RESET_RPU_AMBA, + ZYNQMP_PM_RESET_OCM, + ZYNQMP_PM_RESET_RPU_PGE, + ZYNQMP_PM_RESET_USB0_CORERESET, + ZYNQMP_PM_RESET_USB1_CORERESET, + ZYNQMP_PM_RESET_USB0_HIBERRESET, + ZYNQMP_PM_RESET_USB1_HIBERRESET, + ZYNQMP_PM_RESET_USB0_APB, + ZYNQMP_PM_RESET_USB1_APB, + ZYNQMP_PM_RESET_IPI, + ZYNQMP_PM_RESET_APM_LPD, + ZYNQMP_PM_RESET_RTC, + ZYNQMP_PM_RESET_SYSMON, + ZYNQMP_PM_RESET_AFI_FM6, + ZYNQMP_PM_RESET_LPD_SWDT, + ZYNQMP_PM_RESET_FPD, + ZYNQMP_PM_RESET_RPU_DBG1, + ZYNQMP_PM_RESET_RPU_DBG0, + ZYNQMP_PM_RESET_DBG_LPD, + ZYNQMP_PM_RESET_DBG_FPD, + ZYNQMP_PM_RESET_APLL, + ZYNQMP_PM_RESET_DPLL, + ZYNQMP_PM_RESET_VPLL, + ZYNQMP_PM_RESET_IOPLL, + ZYNQMP_PM_RESET_RPLL, + ZYNQMP_PM_RESET_GPO3_PL_0, + ZYNQMP_PM_RESET_GPO3_PL_1, + ZYNQMP_PM_RESET_GPO3_PL_2, + ZYNQMP_PM_RESET_GPO3_PL_3, + ZYNQMP_PM_RESET_GPO3_PL_4, + ZYNQMP_PM_RESET_GPO3_PL_5, + ZYNQMP_PM_RESET_GPO3_PL_6, + ZYNQMP_PM_RESET_GPO3_PL_7, + ZYNQMP_PM_RESET_GPO3_PL_8, + ZYNQMP_PM_RESET_GPO3_PL_9, + ZYNQMP_PM_RESET_GPO3_PL_10, + ZYNQMP_PM_RESET_GPO3_PL_11, + ZYNQMP_PM_RESET_GPO3_PL_12, + ZYNQMP_PM_RESET_GPO3_PL_13, + ZYNQMP_PM_RESET_GPO3_PL_14, + ZYNQMP_PM_RESET_GPO3_PL_15, + ZYNQMP_PM_RESET_GPO3_PL_16, + ZYNQMP_PM_RESET_GPO3_PL_17, + ZYNQMP_PM_RESET_GPO3_PL_18, + ZYNQMP_PM_RESET_GPO3_PL_19, + ZYNQMP_PM_RESET_GPO3_PL_20, + ZYNQMP_PM_RESET_GPO3_PL_21, + ZYNQMP_PM_RESET_GPO3_PL_22, + ZYNQMP_PM_RESET_GPO3_PL_23, + ZYNQMP_PM_RESET_GPO3_PL_24, + ZYNQMP_PM_RESET_GPO3_PL_25, + ZYNQMP_PM_RESET_GPO3_PL_26, + ZYNQMP_PM_RESET_GPO3_PL_27, + ZYNQMP_PM_RESET_GPO3_PL_28, + ZYNQMP_PM_RESET_GPO3_PL_29, + ZYNQMP_PM_RESET_GPO3_PL_30, + ZYNQMP_PM_RESET_GPO3_PL_31, + ZYNQMP_PM_RESET_RPU_LS, + ZYNQMP_PM_RESET_PS_ONLY, + ZYNQMP_PM_RESET_PL, + ZYNQMP_PM_RESET_PS_PL0, + ZYNQMP_PM_RESET_PS_PL1, + ZYNQMP_PM_RESET_PS_PL2, + ZYNQMP_PM_RESET_PS_PL3, + ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3 +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -102,6 +235,9 @@ struct zynqmp_eemi_ops { int (*clock_setparent)(u32 clock_id, u32 parent_id); int (*clock_getparent)(u32 clock_id, u32 *parent_id); int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); + int (*reset_assert)(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag); + int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status); }; #if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) -- cgit v1.2.3 From 15917dc02841862840efcbfe1da0830f88078b5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Dec 2018 13:04:41 +0100 Subject: sched: Remove stale PF_MUTEX_TESTER bit The RTMUTEX tester was removed long ago but the PF bit stayed around. Remove it and free up the space. Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index d2f90fa92468..e2bba022827d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1409,7 +1409,6 @@ extern struct pid *cad_pid; #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ -- cgit v1.2.3 From 71368af9027f18fe5d1c6f372cfdff7e4bde8b48 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 16 Jan 2019 17:01:36 -0500 Subject: x86/speculation: Add PR_SPEC_DISABLE_NOEXEC With the default SPEC_STORE_BYPASS_SECCOMP/SPEC_STORE_BYPASS_PRCTL mode, the TIF_SSBD bit will be inherited when a new task is fork'ed or cloned. It will also remain when a new program is execve'ed. Only certain class of applications (like Java) that can run on behalf of multiple users on a single thread will require disabling speculative store bypass for security purposes. Those applications will call prctl(2) at startup time to disable SSB. They won't rely on the fact the SSB might have been disabled. Other applications that don't need SSBD will just move on without checking if SSBD has been turned on or not. The fact that the TIF_SSBD is inherited across execve(2) boundary will cause performance of applications that don't need SSBD but their predecessors have SSBD on to be unwittingly impacted especially if they write to memory a lot. To remedy this problem, a new PR_SPEC_DISABLE_NOEXEC argument for the PR_SET_SPECULATION_CTRL option of prctl(2) is added to allow applications to specify that the SSBD feature bit on the task structure should be cleared whenever a new program is being execve'ed. Suggested-by: Thomas Gleixner Signed-off-by: Waiman Long Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Jonathan Corbet Cc: linux-doc@vger.kernel.org Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: David Woodhouse Cc: Jiri Kosina Cc: Josh Poimboeuf Cc: Tim Chen Cc: KarimAllah Ahmed Cc: Peter Zijlstra Cc: Konrad Rzeszutek Wilk Link: https://lkml.kernel.org/r/1547676096-3281-1-git-send-email-longman@redhat.com --- include/linux/sched.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index d2f90fa92468..fc836dc71bba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1459,6 +1459,7 @@ static inline bool is_percpu_thread(void) #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ +#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1487,6 +1488,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) + TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) -- cgit v1.2.3 From fab940755d1d78377901450b6ee7c77356e06821 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 27 Jan 2019 14:03:57 +0100 Subject: x86/hw_breakpoints, kprobes: Remove kprobes ifdeffery Remove the ifdeffery in the breakpoint parsing arch_build_bp_info() by adding a within_kprobe_blacklist() stub for the !CONFIG_KPROBES case. It is returning true when kprobes are not enabled to mean that any address is within the kprobes blacklist on such kernels and thus not allow kernel breakpoints on non-kprobes kernels. Signed-off-by: Borislav Petkov Acked-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: "David S. Miller" Cc: Frederic Weisbecker Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "Naveen N. Rao" Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190127131237.4557-1-bp@alien8.de --- include/linux/kprobes.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e07e91daaacc..201f0f2683f2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -442,6 +442,11 @@ static inline int enable_kprobe(struct kprobe *kp) { return -ENOSYS; } + +static inline bool within_kprobe_blacklist(unsigned long addr) +{ + return true; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { -- cgit v1.2.3 From 5c238a8b599f1ae25eaeb08ad0e9e13e2b9eb023 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Wed, 30 Jan 2019 10:52:01 +0530 Subject: cpufreq: Auto-register the driver as a thermal cooling device if asked All cpufreq drivers do similar things to register as a cooling device. Provide a cpufreq driver flag so drivers can just ask the cpufreq core to register the cooling device on their behalf. This allows us to get rid of duplicated code in the drivers. In order to allow this, we add a struct thermal_cooling_device pointer to struct cpufreq_policy so that drivers don't need to store it in a private data structure. Suggested-by: Stephen Boyd Suggested-by: Viresh Kumar Signed-off-by: Amit Kucheria Reviewed-by: Matthias Kaehlcke Tested-by: Matthias Kaehlcke Acked-by: Viresh Kumar Reviewed-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki --- include/linux/cpufreq.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index c19142911554..9db074ecbbd7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -151,6 +151,9 @@ struct cpufreq_policy { /* For cpufreq driver's internal use */ void *driver_data; + + /* Pointer to the cooling device if used for thermal mitigation */ + struct thermal_cooling_device *cdev; }; /* Only for ACPI */ @@ -378,6 +381,12 @@ struct cpufreq_driver { */ #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) +/* + * Set by drivers that want the core to automatically register the cpufreq + * driver as a thermal cooling device. + */ +#define CPUFREQ_IS_COOLING_DEV BIT(7) + int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -- cgit v1.2.3 From 9bc61ab18b1d41f26dc06b9e6d3c203e65f83fe6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sun, 4 Nov 2018 03:19:03 -0500 Subject: vfs: Introduce fs_context, switch vfs_kern_mount() to it. Introduce a filesystem context concept to be used during superblock creation for mount and superblock reconfiguration for remount. This is allocated at the beginning of the mount procedure and into it is placed: (1) Filesystem type. (2) Namespaces. (3) Source/Device names (there may be multiple). (4) Superblock flags (SB_*). (5) Security details. (6) Filesystem-specific data, as set by the mount options. Accessor functions are then provided to set up a context, parameterise it from monolithic mount data (the data page passed to mount(2)) and tear it down again. A legacy wrapper is provided that implements what will be the basic operations, wrapping access to filesystems that aren't yet aware of the fs_context. Finally, vfs_kern_mount() is changed to make use of the fs_context and mount_fs() is replaced by vfs_get_tree(), called from vfs_kern_mount(). [AV -- add missing kstrdup()] [AV -- put_cred() can be unconditional - fc->cred can't be NULL] [AV -- take legacy_validate() contents into legacy_parse_monolithic()] [AV -- merge KERNEL_MOUNT and USER_MOUNT] [AV -- don't unlock superblock on success return from vfs_get_tree()] [AV -- kill 'reference' argument of init_fs_context()] Signed-off-by: David Howells Co-developed-by: Al Viro Signed-off-by: Al Viro --- include/linux/fs_context.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 include/linux/fs_context.h (limited to 'include/linux') diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h new file mode 100644 index 000000000000..9805514444c9 --- /dev/null +++ b/include/linux/fs_context.h @@ -0,0 +1,64 @@ +/* Filesystem superblock creation and reconfiguration context. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_FS_CONTEXT_H +#define _LINUX_FS_CONTEXT_H + +#include +#include +#include + +struct cred; +struct dentry; +struct file_operations; +struct file_system_type; +struct net; +struct user_namespace; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ +}; + +/* + * Filesystem context for holding the parameters used in the creation or + * reconfiguration of a superblock. + * + * Superblock creation fills in ->root whereas reconfiguration begins with this + * already set. + * + * See Documentation/filesystems/mounting.txt + */ +struct fs_context { + struct file_system_type *fs_type; + void *fs_private; /* The filesystem's context */ + struct dentry *root; /* The root and superblock */ + struct user_namespace *user_ns; /* The user namespace for this mount */ + struct net *net_ns; /* The network namespace for this mount */ + const struct cred *cred; /* The mounter's credentials */ + const char *source; /* The source name (eg. dev path) */ + const char *subtype; /* The subtype to set on the superblock */ + void *security; /* Linux S&M options */ + unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ + unsigned int sb_flags_mask; /* Superblock flags that were changed */ + enum fs_context_purpose purpose:8; + bool need_free:1; /* Need to call ops->free() */ +}; + +/* + * fs_context manipulation functions. + */ +extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, + unsigned int sb_flags); + +extern int vfs_get_tree(struct fs_context *fc); +extern void put_fs_context(struct fs_context *fc); + +#endif /* _LINUX_FS_CONTEXT_H */ -- cgit v1.2.3 From 8f2918898eb5fe25845dde7f4a77bda0e2966e05 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 4 Nov 2018 06:48:34 -0500 Subject: new helpers: vfs_create_mount(), fc_mount() Create a new helper, vfs_create_mount(), that creates a detached vfsmount object from an fs_context that has a superblock attached to it. Almost all uses will be paired with immediately preceding vfs_get_tree(); add a helper for such combination. Switch vfs_kern_mount() to use this. NOTE: mild behaviour change; passing NULL as 'device name' to something like procfs will change /proc/*/mountstats - "device none" instead on "no device". That is consistent with /proc/mounts et.al. [do'h - EXPORT_SYMBOL_GPL slipped in by mistake; removed] [AV -- remove confused comment from vfs_create_mount()] [AV -- removed the second argument] Reviewed-by: David Howells Signed-off-by: Al Viro --- include/linux/mount.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mount.h b/include/linux/mount.h index 037eed52164b..9197ddbf35fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -21,6 +21,7 @@ struct super_block; struct vfsmount; struct dentry; struct mnt_namespace; +struct fs_context; #define MNT_NOSUID 0x01 #define MNT_NODEV 0x02 @@ -88,6 +89,8 @@ struct path; extern struct vfsmount *clone_private_mount(const struct path *path); struct file_system_type; +extern struct vfsmount *fc_mount(struct fs_context *fc); +extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); -- cgit v1.2.3 From a0c9a8b8fd9fd572b0d60276beb2142c8f59f9b8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 4 Nov 2018 07:18:51 -0500 Subject: teach vfs_get_tree() to handle subtype, switch do_new_mount() to it Roll the handling of subtypes into do_new_mount() and vfs_get_tree(). The former determines any subtype string and hangs it off the fs_context; the latter applies it. Make do_new_mount() create, parameterise and commit an fs_context and create a mount for itself rather than calling vfs_kern_mount(). [AV -- missing kstrdup()] [AV -- ... and no kstrdup() if we get to setting ->s_submount - we simply transfer it from fc, leaving NULL behind] [AV -- constify ->s_submount, while we are at it] Reviewed-by: David Howells Signed-off-by: Al Viro --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..36fff12ab890 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1447,7 +1447,7 @@ struct super_block { * Filesystem subtype. If non-empty the filesystem type field * in /proc/mounts will be "type.subtype" */ - char *s_subtype; + const char *s_subtype; const struct dentry_operations *s_d_op; /* default d_op for dentries */ -- cgit v1.2.3 From 8d0347f6c3a9d4953ddd636a31c6584da082e084 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sun, 4 Nov 2018 09:28:36 -0500 Subject: convert do_remount_sb() to fs_context Replace do_remount_sb() with a function, reconfigure_super(), that's fs_context aware. The fs_context is expected to be parameterised already and have ->root pointing to the superblock to be reconfigured. A legacy wrapper is provided that is intended to be called from the fs_context ops when those appear, but for now is called directly from reconfigure_super(). This wrapper invokes the ->remount_fs() superblock op for the moment. It is intended that the remount_fs() op will be phased out. The fs_context->purpose is set to FS_CONTEXT_FOR_RECONFIGURE to indicate that the context is being used for reconfiguration. do_umount_root() is provided to consolidate remount-to-R/O for umount and emergency remount by creating a context and invoking reconfiguration. do_remount(), do_umount() and do_emergency_remount_callback() are switched to use the new process. [AV -- fold UMOUNT and EMERGENCY_REMOUNT in; fixes the umount / bug, gets rid of pointless complexity] [AV -- set ->net_ns in all cases; nfs remount will need that] [AV -- shift security_sb_remount() call into reconfigure_super(); the callers that didn't do security_sb_remount() have NULL fc->security anyway, so it's a no-op for them] Signed-off-by: David Howells Co-developed-by: Al Viro Signed-off-by: Al Viro --- include/linux/fs.h | 1 + include/linux/fs_context.h | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 36fff12ab890..c65d02c5c512 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1337,6 +1337,7 @@ extern int send_sigurg(struct fown_struct *fown); /* These sb flags are internal to the kernel */ #define SB_SUBMOUNT (1<<26) +#define SB_FORCE (1<<27) #define SB_NOSEC (1<<28) #define SB_BORN (1<<29) #define SB_ACTIVE (1<<30) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 9805514444c9..98772f882a3e 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -25,6 +25,7 @@ struct user_namespace; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ + FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */ }; /* @@ -57,6 +58,9 @@ struct fs_context { */ extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, unsigned int sb_flags); +extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry, + unsigned int sb_flags, + unsigned int sb_flags_mask); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); -- cgit v1.2.3 From e1a91586d5da6f879b6dd385a2e7227bf1653570 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 23 Dec 2018 16:25:31 -0500 Subject: fs_context flavour for submounts This is an eventual replacement for vfs_submount() uses. Unlike the "mount" and "remount" cases, the users of that thing are not in VFS - they are buried in various ->d_automount() instances and rather than converting them all at once we introduce the (thankfully small and simple) infrastructure here and deal with the prospective users in afs, nfs, etc. parts of the series. Here we just introduce a new constructor (fs_context_for_submount()) along with the corresponding enum constant to be put into fc->purpose for those. Signed-off-by: Al Viro --- include/linux/fs_context.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 98772f882a3e..7feb018c7a9e 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -25,6 +25,7 @@ struct user_namespace; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ + FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */ FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */ }; @@ -61,6 +62,8 @@ extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry, unsigned int sb_flags, unsigned int sb_flags_mask); +extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type, + struct dentry *reference); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); -- cgit v1.2.3 From f3a09c92018a91ad0981146a4ac59414f814d801 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 23 Dec 2018 18:55:56 -0500 Subject: introduce fs_context methods Signed-off-by: Al Viro --- include/linux/fs.h | 2 ++ include/linux/fs_context.h | 13 +++++++++++++ 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index c65d02c5c512..8d578a9e1e8c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -61,6 +61,7 @@ struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; +struct fs_context; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -2173,6 +2174,7 @@ struct file_system_type { #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ + int (*init_fs_context)(struct fs_context *); struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 7feb018c7a9e..087c12954360 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -20,8 +20,13 @@ struct cred; struct dentry; struct file_operations; struct file_system_type; +struct mnt_namespace; struct net; +struct pid_namespace; +struct super_block; struct user_namespace; +struct vfsmount; +struct path; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ @@ -39,6 +44,7 @@ enum fs_context_purpose { * See Documentation/filesystems/mounting.txt */ struct fs_context { + const struct fs_context_operations *ops; struct file_system_type *fs_type; void *fs_private; /* The filesystem's context */ struct dentry *root; /* The root and superblock */ @@ -54,6 +60,13 @@ struct fs_context { bool need_free:1; /* Need to call ops->free() */ }; +struct fs_context_operations { + void (*free)(struct fs_context *fc); + int (*parse_monolithic)(struct fs_context *fc, void *data); + int (*get_tree)(struct fs_context *fc); + int (*reconfigure)(struct fs_context *fc); +}; + /* * fs_context manipulation functions. */ -- cgit v1.2.3 From c6b82263f9c6e745eb4c5dfc2578d147c4cd7604 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 1 Nov 2018 23:07:23 +0000 Subject: vfs: Introduce logging functions Introduce a set of logging functions through which informational messages, warnings and error messages incurred by the mount procedure can be logged and, in a future patch, passed to userspace instead by way of the filesystem configuration context file descriptor. There are four functions: (1) infof(const char *fmt, ...); Logs an informational message. (2) warnf(const char *fmt, ...); Logs a warning message. (3) errorf(const char *fmt, ...); Logs an error message. (4) invalf(const char *fmt, ...); As errof(), but returns -EINVAL so can be used on a return statement. Signed-off-by: David Howells Signed-off-by: Al Viro --- include/linux/fs_context.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 087c12954360..d208cc40b868 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -81,4 +81,46 @@ extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_ty extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +#define logfc(FC, FMT, ...) pr_notice(FMT, ## __VA_ARGS__) + +/** + * infof - Store supplementary informational message + * @fc: The context in which to log the informational message + * @fmt: The format string + * + * Store the supplementary informational message for the process if the process + * has enabled the facility. + */ +#define infof(fc, fmt, ...) ({ logfc(fc, fmt, ## __VA_ARGS__); }) + +/** + * warnf - Store supplementary warning message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary warning message for the process if the process has + * enabled the facility. + */ +#define warnf(fc, fmt, ...) ({ logfc(fc, fmt, ## __VA_ARGS__); }) + +/** + * errorf - Store supplementary error message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary error message for the process if the process has + * enabled the facility. + */ +#define errorf(fc, fmt, ...) ({ logfc(fc, fmt, ## __VA_ARGS__); }) + +/** + * invalf - Store supplementary invalid argument error message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary error message for the process if the process has + * enabled the facility and return -EINVAL. + */ +#define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; }) + #endif /* _LINUX_FS_CONTEXT_H */ -- cgit v1.2.3 From b7bb367afa4bf9de60830683305c63030c3e581d Mon Sep 17 00:00:00 2001 From: Jonas Bonn Date: Wed, 30 Jan 2019 09:40:04 +0100 Subject: spi: support inter-word delay requirement for devices Some devices are slow and cannot keep up with the SPI bus and therefore require a short delay between words of the SPI transfer. The example of this that I'm looking at is a SAMA5D2 with a minimum SPI clock of 400kHz talking to an AVR-based SPI slave. The AVR cannot put bytes on the bus fast enough to keep up with the SoC's SPI controller even at the lowest bus speed. This patch introduces the ability to specify a required inter-word delay for SPI devices. It is up to the controller driver to configure itself accordingly in order to introduce the requested delay. Note that, for spi_transfer, there is already a field word_delay that provides similar functionality. This field, however, is specified in clock cycles (and worse, SPI controller cycles, not SCK cycles); that makes this value dependent on the master clock instead of the device clock for which the delay is intended to provide some relief. This patch leaves this old word_delay in place and provides a time-based word_delay_us alongside it; the new field fits in the struct padding so struct size is constant. There is only one in-kernel user of the word_delay field and presumably that driver could be reworked to use the time-based value instead. The time-based delay is limited to 8 bits as these delays are intended to be short. The SAMA5D2 that I've tested this on limits delays to a maximum of ~100us, which is already many word-transfer periods even at the minimum transfer speed supported by the controller. Signed-off-by: Jonas Bonn CC: Mark Brown CC: Rob Herring CC: Mark Rutland CC: linux-spi@vger.kernel.org CC: devicetree@vger.kernel.org Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 916bba47d156..662b336aa2e4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -122,6 +122,8 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, * the spi_master. * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when * not using a GPIO line) + * @word_delay_usecs: microsecond delay to be inserted between consecutive + * words of a transfer * * @statistics: statistics for the spi_device * @@ -169,6 +171,7 @@ struct spi_device { const char *driver_override; int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ + uint8_t word_delay_usecs; /* inter-word delay */ /* the statistics */ struct spi_statistics statistics; @@ -721,6 +724,8 @@ extern void spi_res_release(struct spi_controller *ctlr, * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. + * @word_delay_usecs: microseconds to inter word delay after each word size + * (set by bits_per_word) transmission. * @word_delay: clock cycles to inter word delay after each word size * (set by bits_per_word) transmission. * @transfer_list: transfers are sequenced through @spi_message.transfers @@ -803,6 +808,7 @@ struct spi_transfer { #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ u8 bits_per_word; + u8 word_delay_usecs; u16 delay_usecs; u32 speed_hz; u16 word_delay; -- cgit v1.2.3 From 57d4657716aca81ef4d7ec23e8123d26e3d28954 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Wed, 23 Jan 2019 13:35:00 -0500 Subject: audit: ignore fcaps on umount Don't fetch fcaps when umount2 is called to avoid a process hang while it waits for the missing resource to (possibly never) re-appear. Note the comment above user_path_mountpoint_at(): * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. This can happen on ceph, cifs, 9p, lustre, fuse (gluster) or NFS. Please see the github issue tracker https://github.com/linux-audit/audit-kernel/issues/100 Signed-off-by: Richard Guy Briggs [PM: merge fuzz in audit_log_fcaps()] Signed-off-by: Paul Moore --- include/linux/audit.h | 15 ++++++++++----- include/linux/namei.h | 3 +++ 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index ecb5d317d6a2..29251b18331a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -25,6 +25,7 @@ #include #include +#include /* LOOKUP_* */ #include #define AUDIT_INO_UNSET ((unsigned long)-1) @@ -248,6 +249,7 @@ extern void __audit_getname(struct filename *name); #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ +#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); @@ -308,12 +310,15 @@ static inline void audit_getname(struct filename *name) } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int parent) { + unsigned int flags) { if (unlikely(!audit_dummy_context())) { - unsigned int flags = 0; - if (parent) - flags |= AUDIT_INODE_PARENT; - __audit_inode(name, dentry, flags); + unsigned int aflags = 0; + + if (flags & LOOKUP_PARENT) + aflags |= AUDIT_INODE_PARENT; + if (flags & LOOKUP_NO_EVAL) + aflags |= AUDIT_INODE_NOEVAL; + __audit_inode(name, dentry, aflags); } } static inline void audit_file(struct file *file) diff --git a/include/linux/namei.h b/include/linux/namei.h index a78606e8e3df..9138b4471dbf 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -24,6 +24,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; * - internal "there are more path components" flag * - dentry cache is untrusted; force a real lookup * - suppress terminal automount + * - skip revalidation + * - don't fetch xattrs on audit_inode */ #define LOOKUP_FOLLOW 0x0001 #define LOOKUP_DIRECTORY 0x0002 @@ -33,6 +35,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_REVAL 0x0020 #define LOOKUP_RCU 0x0040 #define LOOKUP_NO_REVAL 0x0080 +#define LOOKUP_NO_EVAL 0x0100 /* * Intent data -- cgit v1.2.3 From befa618112a0a4590ce21d70aa35c9d341337774 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 09:21:19 -0800 Subject: bpf: BPF_PROG_TYPE_CGROUP_{SKB, SOCK, SOCK_ADDR} require cgroups enabled There is no way to exercise appropriate attach points without cgroups enabled. This lets test_verifier correctly skip tests for these prog_types if kernel was compiled without BPF cgroup support. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/linux/bpf_types.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 44d9ab4809bd..08bf2f1fe553 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -6,9 +6,11 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) +#ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) +#endif BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) -- cgit v1.2.3 From 116bfa96a255123ed209da6544f74a4f2eaca5da Mon Sep 17 00:00:00 2001 From: Valdis Kletnieks Date: Tue, 29 Jan 2019 01:04:25 -0500 Subject: bpf: fix missing prototype warnings Compiling with W=1 generates warnings: CC kernel/bpf/core.o kernel/bpf/core.c:721:12: warning: no previous prototype for ?bpf_jit_alloc_exec_limit? [-Wmissing-prototypes] 721 | u64 __weak bpf_jit_alloc_exec_limit(void) | ^~~~~~~~~~~~~~~~~~~~~~~~ kernel/bpf/core.c:757:14: warning: no previous prototype for ?bpf_jit_alloc_exec? [-Wmissing-prototypes] 757 | void *__weak bpf_jit_alloc_exec(unsigned long size) | ^~~~~~~~~~~~~~~~~~ kernel/bpf/core.c:762:13: warning: no previous prototype for ?bpf_jit_free_exec? [-Wmissing-prototypes] 762 | void __weak bpf_jit_free_exec(void *addr) | ^~~~~~~~~~~~~~~~~ All three are weak functions that archs can override, provide proper prototypes for when a new arch provides their own. Signed-off-by: Valdis Kletnieks Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index e4b473f85b46..7317376734f7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -880,7 +880,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); - +u64 bpf_jit_alloc_exec_limit(void); +void *bpf_jit_alloc_exec(unsigned long size); +void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); int bpf_jit_get_func_addr(const struct bpf_prog *prog, -- cgit v1.2.3 From 1832f4ef5867fd3898d8a6c6c1978b75d76fc246 Mon Sep 17 00:00:00 2001 From: Valdis Kletnieks Date: Tue, 29 Jan 2019 01:47:06 -0500 Subject: bpf, cgroups: clean up kerneldoc warnings Building with W=1 reveals some bitrot: CC kernel/bpf/cgroup.o kernel/bpf/cgroup.c:238: warning: Function parameter or member 'flags' not described in '__cgroup_bpf_attach' kernel/bpf/cgroup.c:367: warning: Function parameter or member 'unused_flags' not described in '__cgroup_bpf_detach' Add a kerneldoc line for 'flags'. Fixing the warning for 'unused_flags' is best approached by removing the unused parameter on the function call. Signed-off-by: Valdis Kletnieks Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 588dd5f0bd85..695b2a880d9a 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -78,7 +78,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp); int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags); + enum bpf_attach_type type); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); -- cgit v1.2.3 From a08c2a5a31941131c41feaa0429e4c8854cf48f2 Mon Sep 17 00:00:00 2001 From: Thara Gopinath Date: Wed, 23 Jan 2019 08:50:14 +0100 Subject: PM-runtime: Replace jiffies-based accounting with ktime-based accounting Replace jiffies-based accounting for runtime_active_time and runtime_suspended_time with ktime-based accounting. This makes the runtime debug counters inline with genpd and other PM subsytems which use ktime-based accounting. Timekeeping is initialized before driver_init(). It's only at that time that PM-runtime can be enabled. Signed-off-by: Thara Gopinath [switch from ktime to raw nsec] Signed-off-by: Vincent Guittot Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- include/linux/pm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pm.h b/include/linux/pm.h index 0bd9de116826..3d2cbf947768 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -633,9 +633,9 @@ struct dev_pm_info { int runtime_error; int autosuspend_delay; u64 last_busy; - unsigned long active_jiffies; - unsigned long suspended_jiffies; - unsigned long accounting_timestamp; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); -- cgit v1.2.3 From 8204e0c1113d6b7f599bcd7ebfbfde72e76c102f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 22 Jan 2019 10:39:26 -0800 Subject: workqueue: Provide queue_work_node to queue work near a given NUMA node Provide a new function, queue_work_node, which is meant to schedule work on a "random" CPU of the requested NUMA node. The main motivation for this is to help assist asynchronous init to better improve boot times for devices that are local to a specific node. For now we just default to the first CPU that is in the intersection of the cpumask of the node and the online cpumask. The only exception is if the CPU is local to the node we will just use the current CPU. This should work for our purposes as we are currently only using this for unbound work so the CPU will be translated to a node anyway instead of being directly used. As we are only using the first CPU to represent the NUMA node for now I am limiting the scope of the function so that it can only be used with unbound workqueues. Acked-by: Tejun Heo Reviewed-by: Bart Van Assche Acked-by: Dan Williams Signed-off-by: Alexander Duyck Signed-off-by: Greg Kroah-Hartman --- include/linux/workqueue.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 60d673e15632..1f50c1e586e7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -463,6 +463,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); +extern bool queue_work_node(int node, struct workqueue_struct *wq, + struct work_struct *work); extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, -- cgit v1.2.3 From 6be9238e5cb64741ff95c3ae440b112753ad93de Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 22 Jan 2019 10:39:31 -0800 Subject: async: Add support for queueing on specific NUMA node Introduce four new variants of the async_schedule_ functions that allow scheduling on a specific NUMA node. The first two functions are async_schedule_near and async_schedule_near_domain end up mapping to async_schedule and async_schedule_domain, but provide NUMA node specific functionality. They replace the original functions which were moved to inline function definitions that call the new functions while passing NUMA_NO_NODE. The second two functions are async_schedule_dev and async_schedule_dev_domain which provide NUMA specific functionality when passing a device as the data member and that device has a NUMA node other than NUMA_NO_NODE. The main motivation behind this is to address the need to be able to schedule device specific init work on specific NUMA nodes in order to improve performance of memory initialization. I have seen a significant improvement in initialziation time for persistent memory as a result of this approach. In the case of 3TB of memory on a single node the initialization time in the worst case went from 36s down to about 26s for a 10s improvement. As such the data shows a general benefit for affinitizing the async work to the node local to the device. Reviewed-by: Bart Van Assche Reviewed-by: Dan Williams Signed-off-by: Alexander Duyck Signed-off-by: Greg Kroah-Hartman --- include/linux/async.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/async.h b/include/linux/async.h index 6b0226bdaadc..f81d6dbffe68 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -14,6 +14,8 @@ #include #include +#include +#include typedef u64 async_cookie_t; typedef void (*async_func_t) (void *data, async_cookie_t cookie); @@ -37,9 +39,83 @@ struct async_domain { struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 0 } -extern async_cookie_t async_schedule(async_func_t func, void *data); -extern async_cookie_t async_schedule_domain(async_func_t func, void *data, - struct async_domain *domain); +async_cookie_t async_schedule_node(async_func_t func, void *data, + int node); +async_cookie_t async_schedule_node_domain(async_func_t func, void *data, + int node, + struct async_domain *domain); + +/** + * async_schedule - schedule a function for asynchronous execution + * @func: function to execute asynchronously + * @data: data pointer to pass to the function + * + * Returns an async_cookie_t that may be used for checkpointing later. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t async_schedule(async_func_t func, void *data) +{ + return async_schedule_node(func, data, NUMA_NO_NODE); +} + +/** + * async_schedule_domain - schedule a function for asynchronous execution within a certain domain + * @func: function to execute asynchronously + * @data: data pointer to pass to the function + * @domain: the domain + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @domain may be used in the async_synchronize_*_domain() functions to + * wait within a certain synchronization domain rather than globally. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_domain(async_func_t func, void *data, + struct async_domain *domain) +{ + return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); +} + +/** + * async_schedule_dev - A device specific version of async_schedule + * @func: function to execute asynchronously + * @dev: device argument to be passed to function + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. By doing this we can try to + * provide for the best possible outcome by operating on the device on the + * CPUs closest to the device. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_dev(async_func_t func, struct device *dev) +{ + return async_schedule_node(func, dev, dev_to_node(dev)); +} + +/** + * async_schedule_dev_domain - A device specific version of async_schedule_domain + * @func: function to execute asynchronously + * @dev: device argument to be passed to function + * @domain: the domain + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. By doing this we can try to + * provide for the best possible outcome by operating on the device on the + * CPUs closest to the device. + * @domain may be used in the async_synchronize_*_domain() functions to + * wait within a certain synchronization domain rather than globally. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_dev_domain(async_func_t func, struct device *dev, + struct async_domain *domain) +{ + return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); +} + void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); extern void async_synchronize_full_domain(struct async_domain *domain); -- cgit v1.2.3 From 51bee5abeab2058ea5813c5615d6197a23dbf041 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 28 Jan 2019 17:00:13 +0100 Subject: cgroup/pids: turn cgroup_subsys->free() into cgroup_subsys->release() to fix the accounting The only user of cgroup_subsys->free() callback is pids_cgrp_subsys which needs pids_free() to uncharge the pid. However, ->free() is called from __put_task_struct()->cgroup_free() and this is too late. Even the trivial program which does for (;;) { int pid = fork(); assert(pid >= 0); if (pid) wait(NULL); else exit(0); } can run out of limits because release_task()->call_rcu(delayed_put_task_struct) implies an RCU gp after the task/pid goes away and before the final put(). Test-case: mkdir -p /tmp/CG mount -t cgroup2 none /tmp/CG echo '+pids' > /tmp/CG/cgroup.subtree_control mkdir /tmp/CG/PID echo 2 > /tmp/CG/PID/pids.max perl -e 'while ($p = fork) { wait; } $p // die "fork failed: $!\n"' & echo $! > /tmp/CG/PID/cgroup.procs Without this patch the forking process fails soon after migration. Rename cgroup_subsys->free() to cgroup_subsys->release() and move the callsite into the new helper, cgroup_release(), called by release_task() which actually frees the pid(s). Reported-by: Herton R. Krzesinski Reported-by: Jan Stancek Signed-off-by: Oleg Nesterov Signed-off-by: Tejun Heo --- include/linux/cgroup-defs.h | 2 +- include/linux/cgroup.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8fcbae1b8db0..120d1d40704b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -602,7 +602,7 @@ struct cgroup_subsys { void (*cancel_fork)(struct task_struct *task); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); - void (*free)(struct task_struct *task); + void (*release)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); bool early_init:1; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 9968332cceed..81f58b4a5418 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p); extern void cgroup_cancel_fork(struct task_struct *p); extern void cgroup_post_fork(struct task_struct *p); void cgroup_exit(struct task_struct *p); +void cgroup_release(struct task_struct *p); void cgroup_free(struct task_struct *p); int cgroup_init_early(void); @@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; } static inline void cgroup_cancel_fork(struct task_struct *p) {} static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_release(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } -- cgit v1.2.3 From 90462a5bd30c6ed91c6758e59537d047d7878ff9 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Thu, 31 Jan 2019 11:52:11 -0500 Subject: audit: remove unused actx param from audit_rule_match The audit_rule_match() struct audit_context *actx parameter is not used by any in-tree consumers (selinux, apparmour, integrity, smack). The audit context is an internal audit structure that should only be accessed by audit accessor functions. It was part of commit 03d37d25e0f9 ("LSM/Audit: Introduce generic Audit LSM hooks") but appears to have never been used. Remove it. Please see the github issue https://github.com/linux-audit/audit-kernel/issues/107 Signed-off-by: Richard Guy Briggs [PM: fixed the referenced commit title] Signed-off-by: Paul Moore --- include/linux/lsm_hooks.h | 4 +--- include/linux/security.h | 5 ++--- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 9a0bdf91e646..d0b5c7a05832 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1344,7 +1344,6 @@ * @field contains the field which relates to current LSM. * @op contains the operator that will be used for matching. * @rule points to the audit rule that will be checked against. - * @actx points to the audit context associated with the check. * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. * * @audit_rule_free: @@ -1764,8 +1763,7 @@ union security_list_options { int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule); int (*audit_rule_known)(struct audit_krule *krule); - int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, - struct audit_context *actx); + int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule); void (*audit_rule_free)(void *lsmrule); #endif /* CONFIG_AUDIT */ diff --git a/include/linux/security.h b/include/linux/security.h index dbfb5a66babb..e8febec62ffb 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1674,8 +1674,7 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer) #ifdef CONFIG_SECURITY int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); int security_audit_rule_known(struct audit_krule *krule); -int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, - struct audit_context *actx); +int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); void security_audit_rule_free(void *lsmrule); #else @@ -1692,7 +1691,7 @@ static inline int security_audit_rule_known(struct audit_krule *krule) } static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, - void *lsmrule, struct audit_context *actx) + void *lsmrule) { return 0; } -- cgit v1.2.3 From a0ce2f0aa6ad97c3d4927bf2ca54bcebdf062d55 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 23 Jan 2019 15:19:17 +0100 Subject: splice: don't merge into linked buffers Before this patch, it was possible for two pipes to affect each other after data had been transferred between them with tee(): ============ $ cat tee_test.c int main(void) { int pipe_a[2]; if (pipe(pipe_a)) err(1, "pipe"); int pipe_b[2]; if (pipe(pipe_b)) err(1, "pipe"); if (write(pipe_a[1], "abcd", 4) != 4) err(1, "write"); if (tee(pipe_a[0], pipe_b[1], 2, 0) != 2) err(1, "tee"); if (write(pipe_b[1], "xx", 2) != 2) err(1, "write"); char buf[5]; if (read(pipe_a[0], buf, 4) != 4) err(1, "read"); buf[4] = 0; printf("got back: '%s'\n", buf); } $ gcc -o tee_test tee_test.c $ ./tee_test got back: 'abxx' $ ============ As suggested by Al Viro, fix it by creating a separate type for non-mergeable pipe buffers, then changing the types of buffers in splice_pipe_to_pipe() and link_pipe(). Cc: Fixes: 7c77f0b3f920 ("splice: implement pipe to pipe splicing") Fixes: 70524490ee2e ("[PATCH] splice: add support for sys_tee()") Suggested-by: Al Viro Signed-off-by: Jann Horn Signed-off-by: Al Viro --- include/linux/pipe_fs_i.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5a3bb3b7c9ad..3ecd7ea212ae 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -182,6 +182,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; -- cgit v1.2.3 From 01e7187b41191376cee8bea8de9f907b001e87b4 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 23 Jan 2019 15:19:18 +0100 Subject: pipe: stop using ->can_merge Al Viro pointed out that since there is only one pipe buffer type to which new data can be appended, it isn't necessary to have a ->can_merge field in struct pipe_buf_operations, we can just check for a magic type. Suggested-by: Al Viro Signed-off-by: Jann Horn Signed-off-by: Al Viro --- include/linux/pipe_fs_i.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 3ecd7ea212ae..787d224ff43e 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -73,13 +73,6 @@ struct pipe_inode_info { * in fs/pipe.c for the pipe and generic variants of these hooks. */ struct pipe_buf_operations { - /* - * This is set to 1, if the generic pipe read/write may coalesce - * data into an existing buffer. If this is set to 0, a new pipe - * page segment is always used for new data. - */ - int can_merge; - /* * ->confirm() verifies that the data in the pipe buffer is there * and that the contents are good. If the pages in the pipe belong -- cgit v1.2.3 From 4bc59c2f7e306775f3d2e1bbafaa854dd1e09335 Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Wed, 12 Dec 2018 18:33:56 +0100 Subject: mfd / platform: cros_ec: Use devm_mfd_add_devices Use devm_mfd_add_devices() for adding cros-ec core MFD child devices. This reduces the need of remove callback from platform/chrome for removing the MFD child devices. Signed-off-by: Enric Balletbo i Serra Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index de8b588c8776..977ebaa78e99 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -281,16 +281,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -/** - * cros_ec_remove() - Remove a ChromeOS EC. - * @ec_dev: Device to register. - * - * Call this to deregister a ChromeOS EC, then clean up any private data. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_remove(struct cros_ec_device *ec_dev); - /** * cros_ec_register() - Register a new ChromeOS EC, using the provided info. * @ec_dev: Device to register. -- cgit v1.2.3 From ecf8a6cd949ef236ce435ae488ceb6b3354e677e Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Wed, 12 Dec 2018 18:33:57 +0100 Subject: mfd / platform: cros_ec: Move lightbar attributes to its own driver The entire way how cros sysfs attibutes are created is broken. cros_ec_lightbar should be its own driver and its attributes should be associated with a lightbar driver not the mfd driver. In order to retain the path, the lightbar attributes are attached to the cros_class. The patch also adds the sysfs documentation. Signed-off-by: Enric Balletbo i Serra Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 977ebaa78e99..1e9b569564ea 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -327,7 +327,6 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); /* sysfs stuff */ extern struct attribute_group cros_ec_attr_group; -extern struct attribute_group cros_ec_lightbar_attr_group; extern struct attribute_group cros_ec_vbc_attr_group; /* debugfs stuff */ -- cgit v1.2.3 From acb9900f9e8074858738f48bee9a705138961258 Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Wed, 12 Dec 2018 18:33:58 +0100 Subject: mfd / platform: cros_ec: Move vbc attributes to its own driver The entire way how cros sysfs attibutes are created is broken. cros_ec_vbc should be its own driver and its attributes should be associated with a vbc driver not the mfd driver. In order to retain the path, the vbc attributes are attached to the cros_class. The patch also adds the sysfs documentation. Signed-off-by: Enric Balletbo i Serra Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 1e9b569564ea..fdc3152cca1d 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -327,7 +327,6 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); /* sysfs stuff */ extern struct attribute_group cros_ec_attr_group; -extern struct attribute_group cros_ec_vbc_attr_group; /* debugfs stuff */ int cros_ec_debugfs_init(struct cros_ec_dev *ec); -- cgit v1.2.3 From 6fce0a2cf5a050e8a3326556d7d293e69be303be Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Wed, 12 Dec 2018 18:33:59 +0100 Subject: mfd / platform: cros_ec: Move debugfs attributes to its own driver The entire way how cros debugfs attibutes are created is broken. cros_ec_debugfs should be its own driver and its attributes should be associated with a debugfs driver not the mfd driver. Signed-off-by: Enric Balletbo i Serra Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index fdc3152cca1d..e50860d190db 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -328,10 +328,4 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); /* sysfs stuff */ extern struct attribute_group cros_ec_attr_group; -/* debugfs stuff */ -int cros_ec_debugfs_init(struct cros_ec_dev *ec); -void cros_ec_debugfs_remove(struct cros_ec_dev *ec); -void cros_ec_debugfs_suspend(struct cros_ec_dev *ec); -void cros_ec_debugfs_resume(struct cros_ec_dev *ec); - #endif /* __LINUX_MFD_CROS_EC_H */ -- cgit v1.2.3 From 6fd7f2bbd4422e7635bc771cd1ec440378158cb1 Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Wed, 12 Dec 2018 18:34:00 +0100 Subject: mfd / platform: cros_ec: Move device sysfs attributes to its own driver The entire way how cros debugfs attibutes are created is broken. cros_ec_sysfs should be its own driver and its attributes should be associated with the sysfs driver not the mfd driver. The patch also adds the sysfs documentation. Signed-off-by: Enric Balletbo i Serra Reviewed-by: Guenter Roeck Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index e50860d190db..8f2a8918bfa3 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -325,7 +325,4 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); */ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); -/* sysfs stuff */ -extern struct attribute_group cros_ec_attr_group; - #endif /* __LINUX_MFD_CROS_EC_H */ -- cgit v1.2.3 From efb5a790dfc33b36bc64dd5a41ffc3ae5a709770 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sun, 13 Jan 2019 13:36:46 -0500 Subject: mfd: wm831x-core: Drop unused module infrastructure from non-modular code The Kconfig currently controlling compilation of this code is: drivers/mfd/Kconfig:config MFD_WM831X drivers/mfd/Kconfig: bool ...meaning that it currently is not being built as a module by anyone. Lets remove the couple traces of modular infrastructure use, so that when reading the driver there is no doubt it is builtin-only. We delete the MODULE_LICENSE tag etc. since all that information is already contained at the top of the file in the comments. Previous demodularizaion work has made wm831x_device_exit() no longer used, so it is also removed from the 831x core code. Signed-off-by: Paul Gortmaker Acked-by: Charles Keepax Signed-off-by: Lee Jones --- include/linux/mfd/wm831x/core.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index b49fa67612f1..6fcb8eb00282 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -418,7 +418,6 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, int count, u16 *buf); int wm831x_device_init(struct wm831x *wm831x, int irq); -void wm831x_device_exit(struct wm831x *wm831x); int wm831x_device_suspend(struct wm831x *wm831x); void wm831x_device_shutdown(struct wm831x *wm831x); int wm831x_irq_init(struct wm831x *wm831x, int irq); -- cgit v1.2.3 From 0db88688e1bb0180d6348742bdba8927cd0e5670 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sun, 13 Jan 2019 13:36:48 -0500 Subject: mfd: wm8350-core: Drop unused module infrastructure from non-modular code The Kconfig currently controlling compilation of this code is: drivers/mfd/Kconfig:config MFD_WM8350 drivers/mfd/Kconfig: bool ...meaning that it currently is not being built as a module by anyone. Lets remove the couple traces of modular infrastructure use, so that when reading the driver there is no doubt it is builtin-only. We delete the MODULE_LICENSE tag etc. since all that information is already contained at the top of the file in the comments. We replace module.h with init.h and export.h ; the latter since the file does export some symbols. Previous demodularizaion work has made wm8350_device_exit() no longer used, so it is also removed from the 8350 core code. Signed-off-by: Paul Gortmaker Acked-by: Charles Keepax Signed-off-by: Lee Jones --- include/linux/mfd/wm8350/core.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 509481d9cf19..202d9bde2c7c 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -643,7 +643,6 @@ struct wm8350_platform_data { */ int wm8350_device_init(struct wm8350 *wm8350, int irq, struct wm8350_platform_data *pdata); -void wm8350_device_exit(struct wm8350 *wm8350); /* * WM8350 device IO -- cgit v1.2.3 From d57f72875eed3f26afaca176c0f425f209bc99d7 Mon Sep 17 00:00:00 2001 From: Christian Hohnstaedt Date: Mon, 14 Jan 2019 09:16:34 +0100 Subject: mfd: tps65218.c: Add input voltage options These options apply to all regulators in this chip. ti,strict-supply-voltage-supervision: Set STRICT flag in CONFIG1 ti,under-voltage-limit-microvolt: Select 2.75, 2.95, 3.25 or 3.35 V UVLO in CONFIG1 ti,under-voltage-hyst-microvolt: Select 200mV or 400mV UVLOHYS in CONFIG2 Signed-off-by: Christian Hohnstaedt Tested-by: Keerthy Reviewed-by: Keerthy Signed-off-by: Lee Jones --- include/linux/mfd/tps65218.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index c204d9a79436..3cbe103495ab 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -137,6 +137,10 @@ #define TPS65218_CONFIG1_PGDLY_MASK 0x18 #define TPS65218_CONFIG1_STRICT BIT(2) #define TPS65218_CONFIG1_UVLO_MASK 0x3 +#define TPS65218_CONFIG1_UVLO_2750000 0x0 +#define TPS65218_CONFIG1_UVLO_2950000 0x1 +#define TPS65218_CONFIG1_UVLO_3250000 0x2 +#define TPS65218_CONFIG1_UVLO_3350000 0x3 #define TPS65218_CONFIG2_DC12_RST BIT(7) #define TPS65218_CONFIG2_UVLOHYS BIT(6) -- cgit v1.2.3 From cfced786969c2a3e1bca45d7055a00311d93ae6c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 4 Jan 2019 18:20:05 +0100 Subject: dma-mapping: remove the default map_resource implementation Instead provide a proper implementation in the direct mapping code, and also wire it up for arm and powerpc, leaving an error return for all the IOMMU or virtual mapping instances for which we'd have to wire up an actual implementation Signed-off-by: Christoph Hellwig Tested-by: Marek Szyprowski --- include/linux/dma-mapping.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f6ded992c183..9842085e6774 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -208,6 +208,8 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long attrs); int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); +dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs); #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_SWIOTLB) @@ -346,19 +348,19 @@ static inline dma_addr_t dma_map_resource(struct device *dev, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; + dma_addr_t addr = DMA_MAPPING_ERROR; BUG_ON(!valid_dma_direction(dir)); /* Don't allow RAM to be mapped */ BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); - addr = phys_addr; - if (ops && ops->map_resource) + if (dma_is_direct(ops)) + addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); + else if (ops->map_resource) addr = ops->map_resource(dev, phys_addr, size, dir, attrs); debug_dma_map_resource(dev, phys_addr, size, dir, addr); - return addr; } @@ -369,7 +371,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops && ops->unmap_resource) + if (!dma_is_direct(ops) && ops->unmap_resource) ops->unmap_resource(dev, addr, size, dir, attrs); debug_dma_unmap_resource(dev, addr, size, dir); } -- cgit v1.2.3 From 645386dfe6307dbb28f10a4513792a59beda0efa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 4 Jan 2019 17:17:53 +0100 Subject: dma-mapping: don't BUG when calling dma_map_resource on RAM Use WARN_ON_ONCE to print a stack trace and return a proper error code instead. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Tested-by: Marek Szyprowski --- include/linux/dma-mapping.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 9842085e6774..b904d55247ab 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -353,7 +353,8 @@ static inline dma_addr_t dma_map_resource(struct device *dev, BUG_ON(!valid_dma_direction(dir)); /* Don't allow RAM to be mapped */ - BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); + if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) + return DMA_MAPPING_ERROR; if (dma_is_direct(ops)) addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); -- cgit v1.2.3 From e2f3cd831a280fc226118d9369bf3f77aab58c56 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Feb 2019 01:49:14 +0100 Subject: driver core: Fix handling of runtime PM flags in device_link_add() After commit ead18c23c263 ("driver core: Introduce device links reference counting"), if there is a link between the given supplier and the given consumer already, device_link_add() will refcount it and return it unconditionally without updating its flags. It is possible, however, that the second (or any subsequent) caller of device_link_add() for the same consumer-supplier pair will pass DL_FLAG_PM_RUNTIME, possibly along with DL_FLAG_RPM_ACTIVE, in flags to it and the existing link may not behave as expected then. First, if DL_FLAG_PM_RUNTIME is not set in the existing link's flags at all, it needs to be set like during the original initialization of the link. Second, if DL_FLAG_RPM_ACTIVE is passed to device_link_add() in flags (in addition to DL_FLAG_PM_RUNTIME), the existing link should to be updated to reflect the "active" runtime PM configuration of the consumer-supplier pair and extra care must be taken here to avoid possible destructive races with runtime PM of the consumer. To that end, redefine the rpm_active field in struct device_link as a refcount, initialize it to 1 and make rpm_resume() (for the consumer) and device_link_add() increment it whenever they acquire a runtime PM reference on the supplier device. Accordingly, make rpm_suspend() (for the consumer) and pm_runtime_clean_up_links() decrement it and drop runtime PM references to the supplier device in a loop until rpm_active becones 1 again. Fixes: ead18c23c263 ("driver core: Introduce device links reference counting") Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index d0e452fd0bff..5f49d2eff6ed 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -853,7 +853,7 @@ struct device_link { struct list_head c_node; enum device_link_state status; u32 flags; - bool rpm_active; + refcount_t rpm_active; struct kref kref; #ifdef CONFIG_SRCU struct rcu_head rcu_head; -- cgit v1.2.3 From e7dd40105aac9ba051e44ad711123bc53a5e4c71 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Feb 2019 01:59:42 +0100 Subject: driver core: Add device link flag DL_FLAG_AUTOPROBE_CONSUMER Add a new device link flag, DL_FLAG_AUTOPROBE_CONSUMER, to request the driver core to probe for a consumer driver automatically after binding a driver to the supplier device on a persistent managed device link. As unbinding the supplier driver on a managed device link causes the consumer driver to be detached from its device automatically, this flag provides a complementary mechanism which is needed to address some "composite device" use cases. Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index 5f49d2eff6ed..0ab0a3a80ec3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -341,6 +341,7 @@ struct device *driver_find_device(struct device_driver *drv, struct device *start, void *data, int (*match)(struct device *dev, void *data)); +void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); /** @@ -827,12 +828,14 @@ enum device_link_state { * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. + * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) #define DL_FLAG_PM_RUNTIME BIT(2) #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) +#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) /** * struct device_link - Device link representation. -- cgit v1.2.3 From 42bf4152d8a79f89f5456dee63a1f364fbce2dd6 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Tue, 29 Jan 2019 11:19:36 +0530 Subject: tee: add supp_nowait flag in tee_context struct This flag indicates that requests in this context should not wait for tee-supplicant daemon to be started if not present and just return with an error code. It is needed for requests which should be non-blocking in nature like ones arising from TEE based kernel drivers or any in kernel api that uses TEE internal client interface. Signed-off-by: Sumit Garg Reviewed-by: Daniel Thompson Signed-off-by: Jens Wiklander --- include/linux/tee_drv.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 6cfe05893a76..5076502c07d7 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -47,6 +47,11 @@ struct tee_shm_pool; * @releasing: flag that indicates if context is being released right now. * It is needed to break circular dependency on context during * shared memory release. + * @supp_nowait: flag that indicates that requests in this context should not + * wait for tee-supplicant daemon to be started if not present + * and just return with an error code. It is needed for requests + * that arises from TEE based kernel drivers that should be + * non-blocking in nature. */ struct tee_context { struct tee_device *teedev; @@ -54,6 +59,7 @@ struct tee_context { void *data; struct kref refcount; bool releasing; + bool supp_nowait; }; struct tee_param_memref { -- cgit v1.2.3 From 0fc1db9d105915021260eb241661b8e96f5c0f1a Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Tue, 29 Jan 2019 11:19:35 +0530 Subject: tee: add bus driver framework for TEE based devices Introduce a generic TEE bus driver concept for TEE based kernel drivers which would like to communicate with TEE based devices/services. Also add support in module device table for these new TEE based devices. In this TEE bus concept, devices/services are identified via Universally Unique Identifier (UUID) and drivers register a table of device UUIDs which they can support. So this TEE bus framework registers following apis: - match(): Iterates over the driver UUID table to find a corresponding match for device UUID. If a match is found, then this particular device is probed via corresponding probe api registered by the driver. This process happens whenever a device or a driver is registered with TEE bus. - uevent(): Notifies user-space (udev) whenever a new device is registered on this bus for auto-loading of modularized drivers. Also this framework allows for device enumeration to be specific to corresponding TEE implementation like OP-TEE etc. Signed-off-by: Sumit Garg Reviewed-by: Daniel Thompson Reviewed-by: Bhupesh Sharma Signed-off-by: Jens Wiklander --- include/linux/mod_devicetable.h | 9 +++++++++ include/linux/tee_drv.h | 32 +++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f9bd2f34b99f..14eaeeb46f41 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -779,4 +779,13 @@ struct typec_device_id { kernel_ulong_t driver_data; }; +/** + * struct tee_client_device_id - tee based device identifier + * @uuid: For TEE based client devices we use the device uuid as + * the identifier. + */ +struct tee_client_device_id { + uuid_t uuid; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 5076502c07d7..56d7f1b4516d 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -15,11 +15,14 @@ #ifndef __TEE_DRV_H #define __TEE_DRV_H -#include +#include #include #include #include +#include #include +#include +#include /* * The file describes the API provided by the generic TEE driver to the @@ -544,4 +547,31 @@ static inline bool tee_param_is_memref(struct tee_param *param) } } +extern struct bus_type tee_bus_type; + +/** + * struct tee_client_device - tee based device + * @id: device identifier + * @dev: device structure + */ +struct tee_client_device { + struct tee_client_device_id id; + struct device dev; +}; + +#define to_tee_client_device(d) container_of(d, struct tee_client_device, dev) + +/** + * struct tee_client_driver - tee client driver + * @id_table: device id table supported by this driver + * @driver: driver structure + */ +struct tee_client_driver { + const struct tee_client_device_id *id_table; + struct device_driver driver; +}; + +#define to_tee_client_driver(d) \ + container_of(d, struct tee_client_driver, driver) + #endif /*__TEE_DRV_H*/ -- cgit v1.2.3 From d83525ca62cf8ebe3271d14c36fb900c294274a2 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 31 Jan 2019 15:40:04 -0800 Subject: bpf: introduce bpf_spin_lock Introduce 'struct bpf_spin_lock' and bpf_spin_lock/unlock() helpers to let bpf program serialize access to other variables. Example: struct hash_elem { int cnt; struct bpf_spin_lock lock; }; struct hash_elem * val = bpf_map_lookup_elem(&hash_map, &key); if (val) { bpf_spin_lock(&val->lock); val->cnt++; bpf_spin_unlock(&val->lock); } Restrictions and safety checks: - bpf_spin_lock is only allowed inside HASH and ARRAY maps. - BTF description of the map is mandatory for safety analysis. - bpf program can take one bpf_spin_lock at a time, since two or more can cause dead locks. - only one 'struct bpf_spin_lock' is allowed per map element. It drastically simplifies implementation yet allows bpf program to use any number of bpf_spin_locks. - when bpf_spin_lock is taken the calls (either bpf2bpf or helpers) are not allowed. - bpf program must bpf_spin_unlock() before return. - bpf program can access 'struct bpf_spin_lock' only via bpf_spin_lock()/bpf_spin_unlock() helpers. - load/store into 'struct bpf_spin_lock lock;' field is not allowed. - to use bpf_spin_lock() helper the BTF description of map value must be a struct and have 'struct bpf_spin_lock anyname;' field at the top level. Nested lock inside another struct is not allowed. - syscall map_lookup doesn't copy bpf_spin_lock field to user space. - syscall map_update and program map_update do not update bpf_spin_lock field. - bpf_spin_lock cannot be on the stack or inside networking packet. bpf_spin_lock can only be inside HASH or ARRAY map value. - bpf_spin_lock is available to root only and to all program types. - bpf_spin_lock is not allowed in inner maps of map-in-map. - ld_abs is not allowed inside spin_lock-ed region. - tracing progs and socket filter progs cannot use bpf_spin_lock due to insufficient preemption checks Implementation details: - cgroup-bpf class of programs can nest with xdp/tc programs. Hence bpf_spin_lock is equivalent to spin_lock_irqsave. Other solutions to avoid nested bpf_spin_lock are possible. Like making sure that all networking progs run with softirq disabled. spin_lock_irqsave is the simplest and doesn't add overhead to the programs that don't use it. - arch_spinlock_t is used when its implemented as queued_spin_lock - archs can force their own arch_spinlock_t - on architectures where queued_spin_lock is not available and sizeof(arch_spinlock_t) != sizeof(__u32) trivial lock is used. - presence of bpf_spin_lock inside map value could have been indicated via extra flag during map_create, but specifying it via BTF is cleaner. It provides introspection for map key/value and reduces user mistakes. Next steps: - allow bpf_spin_lock in other map types (like cgroup local storage) - introduce BPF_F_LOCK flag for bpf_map_update() syscall and helper to request kernel to grab bpf_spin_lock before rewriting the value. That will serialize access to map elements. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 37 ++++++++++++++++++++++++++++++++++--- include/linux/bpf_verifier.h | 1 + include/linux/btf.h | 1 + 3 files changed, 36 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0394f1f9213b..2ae615b48bb8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -72,14 +72,15 @@ struct bpf_map { u32 value_size; u32 max_entries; u32 map_flags; - u32 pages; + int spin_lock_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; + u32 pages; bool unpriv_array; - /* 55 bytes hole */ + /* 51 bytes hole */ /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. @@ -91,6 +92,34 @@ struct bpf_map { char name[BPF_OBJ_NAME_LEN]; }; +static inline bool map_value_has_spin_lock(const struct bpf_map *map) +{ + return map->spin_lock_off >= 0; +} + +static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +{ + if (likely(!map_value_has_spin_lock(map))) + return; + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; +} + +/* copy everything but bpf_spin_lock */ +static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) +{ + if (unlikely(map_value_has_spin_lock(map))) { + u32 off = map->spin_lock_off; + + memcpy(dst, src, off); + memcpy(dst + off + sizeof(struct bpf_spin_lock), + src + off + sizeof(struct bpf_spin_lock), + map->value_size - off - sizeof(struct bpf_spin_lock)); + } else { + memcpy(dst, src, map->value_size); + } +} + struct bpf_offload_dev; struct bpf_offloaded_map; @@ -162,6 +191,7 @@ enum bpf_arg_type { ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ }; /* type of values returned from helper functions */ @@ -879,7 +909,8 @@ extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; extern const struct bpf_func_proto bpf_msg_redirect_map_proto; extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; extern const struct bpf_func_proto bpf_sk_redirect_map_proto; - +extern const struct bpf_func_proto bpf_spin_lock_proto; +extern const struct bpf_func_proto bpf_spin_unlock_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; /* Shared helpers among cBPF and eBPF. */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 0620e418dde5..69f7a3449eda 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -148,6 +148,7 @@ struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; u32 curframe; + u32 active_spin_lock; bool speculative; }; diff --git a/include/linux/btf.h b/include/linux/btf.h index 12502e25e767..455d31b55828 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -50,6 +50,7 @@ u32 btf_id(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); +int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); -- cgit v1.2.3 From 96049f3afd50fe8db69fa0068cdca822e747b1e4 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 31 Jan 2019 15:40:09 -0800 Subject: bpf: introduce BPF_F_LOCK flag Introduce BPF_F_LOCK flag for map_lookup and map_update syscall commands and for map_update() helper function. In all these cases take a lock of existing element (which was provided in BTF description) before copying (in or out) the rest of map value. Implementation details that are part of uapi: Array: The array map takes the element lock for lookup/update. Hash: hash map also takes the lock for lookup/update and tries to avoid the bucket lock. If old element exists it takes the element lock and updates the element in place. If element doesn't exist it allocates new one and inserts into hash table while holding the bucket lock. In rare case the hashmap has to take both the bucket lock and the element lock to update old value in place. Cgroup local storage: It is similar to array. update in place and lookup are done with lock taken. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2ae615b48bb8..bd169a7bcc93 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -119,6 +119,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) memcpy(dst, src, map->value_size); } } +void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, + bool lock_src); struct bpf_offload_dev; struct bpf_offloaded_map; -- cgit v1.2.3 From b8580e9de48bf32b884910d22330ef2fa027cf01 Mon Sep 17 00:00:00 2001 From: Shunyong Yang Date: Fri, 1 Feb 2019 17:11:14 -0600 Subject: PCI: Add HXT vendor ID Add the HXT vendor ID to pci_ids.h. Signed-off-by: Shunyong Yang [bhelgaas: split to separate patch] Signed-off-by: Bjorn Helgaas Reviewed-by: Sinan Kaya CC: Joey Zheng --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 5eaf39dbc388..26420e619dd7 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2573,6 +2573,8 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_VENDOR_ID_HXT 0x1dbf + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 -- cgit v1.2.3 From 0ce26a1c31ca928df4dfc7504c8898b71ff9f5d5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 1 Feb 2019 17:24:52 -0600 Subject: PCI: Move Rohm Vendor ID to generic list Move the Rohm Vendor ID to pci_ids.h instead of defining it in several drivers. Signed-off-by: Andy Shevchenko Signed-off-by: Bjorn Helgaas Acked-by: Mark Brown Acked-by: Linus Walleij --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 26420e619dd7..70e86148cb1e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1140,6 +1140,8 @@ #define PCI_VENDOR_ID_TCONRAD 0x10da #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 +#define PCI_VENDOR_ID_ROHM 0x10db + #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 -- cgit v1.2.3 From f38ab20b749da84e3df1f8c9240ddc791b0d5983 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Thu, 20 Dec 2018 14:59:33 +0800 Subject: iio: st_accel: use ACPI orientation data Platform-specific ST accelerometer mount matrix information can be provided by returning a package of 6 integers from the ACPI _ONT method. This has been seen on Acer products such as Veriton Z4860G, Z6860G and A890, which include a ST SMO8840 sensor. We have also confirmed experimentally that the Windows driver uses such information. The _ONT data format was explained by a ST vendor contact. However, strangely enough, the _ONT transformations must be applied after first applying another mount matrix which we determined experimentally. ST have not commented on why this is the case, but we imagine that perhaps earlier devices (before _ONT was introduced) required this translation and hence it became 'standard.' Interpret the _ONT data and export the equivalent mount matrix to userspace. If no _ONT data is present, no mount matrix is exported. Signed-off-by: Daniel Drake Signed-off-by: Jonathan Cameron --- include/linux/iio/common/st_sensors.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 8092b8e7f37e..45e9667f0a8c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -260,6 +260,7 @@ struct st_sensor_settings { struct st_sensor_data { struct device *dev; struct iio_trigger *trig; + struct iio_mount_matrix *mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; -- cgit v1.2.3 From d5d30d5a5c60628de5e77e3f292a8f9012d51350 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 2 Feb 2019 16:35:26 -0800 Subject: libnvdimm/dimm: Add a no-BLK quirk based on NVDIMM family As Dexuan reports the NVDIMM_FAMILY_HYPERV platform is incompatible with the existing Linux namespace implementation because it uses NSLABEL_FLAG_LOCAL for x1-width PMEM interleave sets. Quirk it as an platform / DIMM that does not provide BLK-aperture access. Allow the libnvdimm core to assume no potential for aliasing. In case other implementations make the same mistake, provide a "noblk" module parameter to force-enable the quirk. Link: https://lkml.kernel.org/r/PU1P153MB0169977604493B82B662A01CBF920@PU1P153MB0169.APCP153.PROD.OUTLOOK.COM Reported-by: Dexuan Cui Tested-by: Dexuan Cui Signed-off-by: Dan Williams --- include/linux/libnvdimm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..7da406ae3a2b 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -42,6 +42,8 @@ enum { NDD_SECURITY_OVERWRITE = 3, /* tracking whether or not there is a pending device reference */ NDD_WORK_PENDING = 4, + /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ + NDD_NOBLK = 5, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, -- cgit v1.2.3 From 79a4e91d1bb2a411a4ce2baa93680fa707567003 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 2 Feb 2019 19:50:17 -0800 Subject: device.h: Add __cold to dev_ logging functions Add __cold to the dev_ logging functions similar to the use of __cold in the generic printk function. Using __cold moves all the dev_ logging functions out-of-line possibly improving code locality and runtime performance. Signed-off-by: Joe Perches Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index 0ab0a3a80ec3..a36830e2d0e5 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1384,28 +1384,28 @@ void device_link_remove(void *consumer, struct device *supplier); #ifdef CONFIG_PRINTK -__printf(3, 0) +__printf(3, 0) __cold int dev_vprintk_emit(int level, const struct device *dev, const char *fmt, va_list args); -__printf(3, 4) +__printf(3, 4) __cold int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); -__printf(3, 4) +__printf(3, 4) __cold void dev_printk(const char *level, const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_emerg(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_alert(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_crit(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_err(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_warn(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_notice(const struct device *dev, const char *fmt, ...); -__printf(2, 3) +__printf(2, 3) __cold void _dev_info(const struct device *dev, const char *fmt, ...); #else -- cgit v1.2.3 From dda7a817f2873a0e0b1c7fde1265758f3623daa4 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Tue, 22 Jan 2019 08:48:49 +0200 Subject: net/mlx5: Add XRC transport to ODP device capabilities layout The device capabilities for ODP structure was missing the field for XRC transport so add it here. Signed-off-by: Moni Shoua Reviewed-by: Majd Dibbiny Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 35fe5217b244..5407db8ba8e1 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -831,7 +831,9 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; - u8 reserved_at_e0[0x720]; + struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; + + u8 reserved_at_100[0x700]; }; struct mlx5_ifc_calc_op { -- cgit v1.2.3 From 46861e3e88be18846971792b763eaf520a91a802 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Tue, 22 Jan 2019 08:48:51 +0200 Subject: net/mlx5: Set ODP SRQ support in firmware To avoid compatibility issue with older kernels the firmware doesn't allow SRQ to work with ODP unless kernel asks for it. Signed-off-by: Moni Shoua Reviewed-by: Majd Dibbiny Signed-off-by: Leon Romanovsky --- include/linux/mlx5/device.h | 3 +++ include/linux/mlx5/mlx5_ifc.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8c4a820bd4c1..0845a227a7b2 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1201,6 +1201,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) +#define MLX5_CAP_ODP_MAX(mdev, cap)\ + MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) + #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5407db8ba8e1..c5c679390fbd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -72,6 +72,7 @@ enum { enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, + MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, }; -- cgit v1.2.3 From 13c6ee2a921683bae4bb4ba57b1f5b82f49e6b8a Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 2 Feb 2019 07:34:48 -0800 Subject: socket: Use old_timeval types for socket timestamps As part of y2038 solution, all internal uses of struct timeval are replaced by struct __kernel_old_timeval and struct compat_timeval by struct old_timeval32. Make socket timestamps use these new types. This is mainly to be able to verify that the kernel build is y2038 safe when such non y2038 safe types are not supported anymore. Signed-off-by: Deepa Dinamani Acked-by: Willem de Bruijn Cc: isdn@linux-pingi.de Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c34595374e93..4001611a4c9f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3486,16 +3486,16 @@ static inline ktime_t skb_get_ktime(const struct sk_buff *skb) /** * skb_get_timestamp - get timestamp from a skb * @skb: skb to get stamp from - * @stamp: pointer to struct timeval to store stamp in + * @stamp: pointer to struct __kernel_old_timeval to store stamp in * * Timestamps are stored in the skb as offsets to a base timestamp. * This function converts the offset back to a struct timeval and stores * it in stamp. */ static inline void skb_get_timestamp(const struct sk_buff *skb, - struct timeval *stamp) + struct __kernel_old_timeval *stamp) { - *stamp = ktime_to_timeval(skb->tstamp); + *stamp = ns_to_kernel_old_timeval(skb->tstamp); } static inline void skb_get_timestampns(const struct sk_buff *skb, -- cgit v1.2.3 From 887feae36aee6c08e0dafcdaa5ba921abbb2c56b Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 2 Feb 2019 07:34:50 -0800 Subject: socket: Add SO_TIMESTAMP[NS]_NEW Add SO_TIMESTAMP_NEW and SO_TIMESTAMPNS_NEW variants of socket timestamp options. These are the y2038 safe versions of the SO_TIMESTAMP_OLD and SO_TIMESTAMPNS_OLD for all architectures. Note that the format of scm_timestamping.ts[0] is not changed in this patch. Signed-off-by: Deepa Dinamani Acked-by: Willem de Bruijn Cc: jejb@parisc-linux.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: linux-alpha@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-parisc@vger.kernel.org Cc: linux-rdma@vger.kernel.org Cc: netdev@vger.kernel.org Cc: sparclinux@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/skbuff.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4001611a4c9f..831846617d07 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3498,12 +3498,30 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, *stamp = ns_to_kernel_old_timeval(skb->tstamp); } +static inline void skb_get_new_timestamp(const struct sk_buff *skb, + struct __kernel_sock_timeval *stamp) +{ + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_usec = ts.tv_nsec / 1000; +} + static inline void skb_get_timestampns(const struct sk_buff *skb, struct timespec *stamp) { *stamp = ktime_to_timespec(skb->tstamp); } +static inline void skb_get_new_timestampns(const struct sk_buff *skb, + struct __kernel_timespec *stamp) +{ + struct timespec64 ts = ktime_to_timespec64(skb->tstamp); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_nsec = ts.tv_nsec; +} + static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); -- cgit v1.2.3 From 9718475e69084de15c3930ce35672a7dc6da866b Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 2 Feb 2019 07:34:51 -0800 Subject: socket: Add SO_TIMESTAMPING_NEW Add SO_TIMESTAMPING_NEW variant of socket timestamp options. This is the y2038 safe versions of the SO_TIMESTAMPING_OLD for all architectures. Signed-off-by: Deepa Dinamani Acked-by: Willem de Bruijn Cc: chris@zankel.net Cc: fenghua.yu@intel.com Cc: rth@twiddle.net Cc: tglx@linutronix.de Cc: ubraun@linux.ibm.com Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-s390@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: sparclinux@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/socket.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index ab2041a00e01..6016daeecee4 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -349,9 +349,17 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); +struct timespec64; struct __kernel_timespec; struct old_timespec32; +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss); +extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss); + /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff * forbid_cmsg_compat==false */ -- cgit v1.2.3 From 9fb20801dab46238706267896df1b3938d977129 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 1 Feb 2019 20:20:52 -0800 Subject: net: Fix ip_mc_{dec,inc}_group allocation context After 4effd28c1245 ("bridge: join all-snoopers multicast address"), I started seeing the following sleep in atomic warnings: [ 26.763893] BUG: sleeping function called from invalid context at mm/slab.h:421 [ 26.771425] in_atomic(): 1, irqs_disabled(): 0, pid: 1658, name: sh [ 26.777855] INFO: lockdep is turned off. [ 26.781916] CPU: 0 PID: 1658 Comm: sh Not tainted 5.0.0-rc4 #20 [ 26.787943] Hardware name: BCM97278SV (DT) [ 26.792118] Call trace: [ 26.794645] dump_backtrace+0x0/0x170 [ 26.798391] show_stack+0x24/0x30 [ 26.801787] dump_stack+0xa4/0xe4 [ 26.805182] ___might_sleep+0x208/0x218 [ 26.809102] __might_sleep+0x78/0x88 [ 26.812762] kmem_cache_alloc_trace+0x64/0x28c [ 26.817301] igmp_group_dropped+0x150/0x230 [ 26.821573] ip_mc_dec_group+0x1b0/0x1f8 [ 26.825585] br_ip4_multicast_leave_snoopers.isra.11+0x174/0x190 [ 26.831704] br_multicast_toggle+0x78/0xcc [ 26.835887] store_bridge_parm+0xc4/0xfc [ 26.839894] multicast_snooping_store+0x3c/0x4c [ 26.844517] dev_attr_store+0x44/0x5c [ 26.848262] sysfs_kf_write+0x50/0x68 [ 26.852006] kernfs_fop_write+0x14c/0x1b4 [ 26.856102] __vfs_write+0x60/0x190 [ 26.859668] vfs_write+0xc8/0x168 [ 26.863059] ksys_write+0x70/0xc8 [ 26.866449] __arm64_sys_write+0x24/0x30 [ 26.870458] el0_svc_common+0xa0/0x11c [ 26.874291] el0_svc_handler+0x38/0x70 [ 26.878120] el0_svc+0x8/0xc while toggling the bridge's multicast_snooping attribute dynamically. Pass a gfp_t down to igmpv3_add_delrec(), introduce __igmp_group_dropped() and introduce __ip_mc_dec_group() to take a gfp_t argument. Similarly introduce ____ip_mc_inc_group() and __ip_mc_inc_group() to allow caller to specify gfp_t. IPv6 part of the patch appears fine. Fixes: 4effd28c1245 ("bridge: join all-snoopers multicast address") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/igmp.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 8b4348f69bc5..cc85f4524dbf 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -137,7 +137,13 @@ extern void ip_mc_up(struct in_device *); extern void ip_mc_down(struct in_device *); extern void ip_mc_unmap(struct in_device *); extern void ip_mc_remap(struct in_device *); -extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); +extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp); +static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) +{ + return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL); +} +extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, + gfp_t gfp); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); int ip_mc_check_igmp(struct sk_buff *skb); -- cgit v1.2.3 From 5f3d544f1671d214cd26e45bda326f921455256e Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Fri, 1 Feb 2019 22:45:17 -0500 Subject: audit: remove audit_context when CONFIG_ AUDIT and not AUDITSYSCALL Remove audit_context from struct task_struct and struct audit_buffer when CONFIG_AUDIT is enabled but CONFIG_AUDITSYSCALL is not. Also, audit_log_name() (and supporting inode and fcaps functions) should have been put back in auditsc.c when soft and hard link logging was normalized since it is only used by syscall auditing. See github issue https://github.com/linux-audit/audit-kernel/issues/105 Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/sched.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index f9788bb122c5..765119df759a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -885,8 +885,10 @@ struct task_struct { struct callback_head *task_works; - struct audit_context *audit_context; #ifdef CONFIG_AUDIT +#ifdef CONFIG_AUDITSYSCALL + struct audit_context *audit_context; +#endif kuid_t loginuid; unsigned int sessionid; #endif -- cgit v1.2.3 From ce3fdb697f684b0c018cda8af91f953b7936a9c2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 2 Feb 2019 19:47:25 -0800 Subject: netdevice.h: Add __cold to netdev_ logging functions Add __cold to the netdev_ logging functions similar to the use of __cold in the generic printk function. Using __cold moves all the netdev_ logging functions out-of-line possibly improving code locality and runtime performance. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- include/linux/netdevice.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e675ef97a426..ba57d0ba425e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4663,22 +4663,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev) return " (unknown)"; } -__printf(3, 4) +__printf(3, 4) __cold void netdev_printk(const char *level, const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_emerg(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_alert(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_crit(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_err(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_warn(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_notice(const struct net_device *dev, const char *format, ...); -__printf(2, 3) +__printf(2, 3) __cold void netdev_info(const struct net_device *dev, const char *format, ...); #define netdev_level_once(level, dev, fmt, ...) \ -- cgit v1.2.3 From 494c704f9af0a0cddf593b381ea44320888733e6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 2 Feb 2019 10:41:13 +0100 Subject: efi: Use 32-bit alignment for efi_guid_t The UEFI spec and EDK2 reference implementation both define EFI_GUID as struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), this means that firmware services invoked by the kernel may assume that efi_guid_t* arguments are 32-bit aligned, and use memory accessors that do not tolerate misalignment. So let's set the minimum alignment to 32 bits. Note that the UEFI spec as well as some comments in the EDK2 code base suggest that EFI_GUID should be 64-bit aligned, but this appears to be a mistake, given that no code seems to exist that actually enforces that or relies on it. Reported-by: Heinrich Schuchardt Signed-off-by: Ard Biesheuvel Reviewed-by: Leif Lindholm Cc: AKASHI Takahiro Cc: Alexander Graf Cc: Bjorn Andersson Cc: Borislav Petkov Cc: Jeffrey Hugo Cc: Lee Jones Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Jones Cc: Peter Zijlstra Cc: Sai Praneeth Prakhya Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20190202094119.13230-5-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- include/linux/efi.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..be08518c2553 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -48,7 +48,20 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; -typedef guid_t efi_guid_t; +/* + * The UEFI spec and EDK2 reference implementation both define EFI_GUID as + * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment + * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), + * this means that firmware services invoked by the kernel may assume that + * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that + * do not tolerate misalignment. So let's set the minimum alignment to 32 bits. + * + * Note that the UEFI spec as well as some comments in the EDK2 code base + * suggest that EFI_GUID should be 64-bit aligned, but this appears to be + * a mistake, given that no code seems to exist that actually enforces that + * or relies on it. + */ +typedef guid_t efi_guid_t __aligned(__alignof__(u32)); #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) -- cgit v1.2.3 From 8c94abbbe1ba24961278055434504b7dc3595415 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Mon, 28 Jan 2019 14:27:26 +0200 Subject: perf: Convert perf_event_context.refcount to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable perf_event_context.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. Please check Documentation/core-api/refcount-vs-atomic.rst for more information. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the perf_event_context.refcount it might make a difference in following places: - get_ctx(), perf_event_ctx_lock_nested(), perf_lock_task_context() and __perf_event_ctx_lock_double(): increment in refcount_inc_not_zero() only guarantees control dependency on success vs. fully ordered atomic counterpart - put_ctx(): decrement in refcount_dec_and_test() provides RELEASE ordering and ACQUIRE ordering + control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook Signed-off-by: Elena Reshetova Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: namhyung@kernel.org Link: https://lkml.kernel.org/r/1548678448-24458-2-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a79e59fc3b7d..6cb5d483ab34 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -54,6 +54,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include struct perf_callchain_entry { @@ -737,7 +738,7 @@ struct perf_event_context { int nr_stat; int nr_freq; int rotate_disable; - atomic_t refcount; + refcount_t refcount; struct task_struct *task; /* -- cgit v1.2.3 From d036bda7d0e7269c2982eb979acfef855f5d7977 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 18 Jan 2019 14:27:26 +0200 Subject: sched/core: Convert sighand_struct.count to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable sighand_struct.count is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. The full comparison can be seen in https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon in state to be merged to the documentation tree. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the sighand_struct.count it might make a difference in following places: - __cleanup_sighand: decrement in refcount_dec_and_test() only provides RELEASE ordering and control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook Signed-off-by: Elena Reshetova Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Reviewed-by: Andrea Parri Reviewed-by: Oleg Nesterov Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: viro@zeniv.linux.org.uk Link: https://lkml.kernel.org/r/1547814450-18902-2-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar --- include/linux/sched/signal.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 13789d10a50e..37eeb1a28eba 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -8,13 +8,14 @@ #include #include #include +#include /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { - atomic_t count; + refcount_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; -- cgit v1.2.3 From 60d4de3ff7f775509deba94b3db3c1abe55bf7a5 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 18 Jan 2019 14:27:27 +0200 Subject: sched/core: Convert signal_struct.sigcnt to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable signal_struct.sigcnt is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. The full comparison can be seen in https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon in state to be merged to the documentation tree. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the signal_struct.sigcnt it might make a difference in following places: - put_signal_struct(): decrement in refcount_dec_and_test() only provides RELEASE ordering and control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook Signed-off-by: Elena Reshetova Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Reviewed-by: Andrea Parri Reviewed-by: Oleg Nesterov Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: viro@zeniv.linux.org.uk Link: https://lkml.kernel.org/r/1547814450-18902-3-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar --- include/linux/sched/signal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 37eeb1a28eba..ae5655197698 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -83,7 +83,7 @@ struct multiprocess_signals { * the locking of signal_struct. */ struct signal_struct { - atomic_t sigcnt; + refcount_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; -- cgit v1.2.3 From ec1d281923cf81cc660343d0cb8ffc837ffb991d Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 18 Jan 2019 14:27:29 +0200 Subject: sched/core: Convert task_struct.usage to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable task_struct.usage is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. The full comparison can be seen in https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon in state to be merged to the documentation tree. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the task_struct.usage it might make a difference in following places: - put_task_struct(): decrement in refcount_dec_and_test() only provides RELEASE ordering and control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook Signed-off-by: Elena Reshetova Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Reviewed-by: Andrea Parri Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: viro@zeniv.linux.org.uk Link: https://lkml.kernel.org/r/1547814450-18902-5-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 ++- include/linux/sched/task.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index e2bba022827d..9d14d6864ca6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -607,7 +608,7 @@ struct task_struct { randomized_struct_fields_start void *stack; - atomic_t usage; + refcount_t usage; /* Per task flags (PF_*), defined further below: */ unsigned int flags; unsigned int ptrace; diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 44c6f15800ff..2e97a2227045 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -88,13 +88,13 @@ extern void sched_exec(void); #define sched_exec() {} #endif -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) { - if (atomic_dec_and_test(&t->usage)) + if (refcount_dec_and_test(&t->usage)) __put_task_struct(t); } -- cgit v1.2.3 From f0b89d3958d73cd0785ec381f0ddf8efb6f183d8 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 18 Jan 2019 14:27:30 +0200 Subject: sched/core: Convert task_struct.stack_refcount to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable task_struct.stack_refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. The full comparison can be seen in https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon in state to be merged to the documentation tree. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the task_struct.stack_refcount it might make a difference in following places: - try_get_task_stack(): increment in refcount_inc_not_zero() only guarantees control dependency on success vs. fully ordered atomic counterpart - put_task_stack(): decrement in refcount_dec_and_test() only provides RELEASE ordering and control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook Signed-off-by: Elena Reshetova Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Reviewed-by: Andrea Parri Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: viro@zeniv.linux.org.uk Link: https://lkml.kernel.org/r/1547814450-18902-6-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 1 + include/linux/sched.h | 2 +- include/linux/sched/task_stack.h | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a7083a45a26c..6049baa5b8bc 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/sched.h b/include/linux/sched.h index 9d14d6864ca6..628bf13cb5a5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1194,7 +1194,7 @@ struct task_struct { #endif #ifdef CONFIG_THREAD_INFO_IN_TASK /* A live task holds one reference: */ - atomic_t stack_refcount; + refcount_t stack_refcount; #endif #ifdef CONFIG_LIVEPATCH int patch_state; diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index 6a841929073f..2413427e439c 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -61,7 +61,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #ifdef CONFIG_THREAD_INFO_IN_TASK static inline void *try_get_task_stack(struct task_struct *tsk) { - return atomic_inc_not_zero(&tsk->stack_refcount) ? + return refcount_inc_not_zero(&tsk->stack_refcount) ? task_stack_page(tsk) : NULL; } -- cgit v1.2.3 From 07879c6a3740fbbf3c8891a0ab484c20a12794d8 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 18 Dec 2018 11:53:52 -0800 Subject: sched/wake_q: Reduce reference counting for special users Some users, specifically futexes and rwsems, required fixes that allowed the callers to be safe when wakeups occur before they are expected by wake_up_q(). Such scenarios also play games and rely on reference counting, and until now were pivoting on wake_q doing it. With the wake_q_add() call being moved down, this can no longer be the case. As such we end up with a a double task refcounting overhead; and these callers care enough about this (being rather core-ish). This patch introduces a wake_q_add_safe() call that serves for callers that have already done refcounting and therefore the task is 'safe' from wake_q point of view (int that it requires reference throughout the entire queue/>wakeup cycle). In the one case it has internal reference counting, in the other case it consumes the reference counting. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: Xie Yongji Cc: Yongji Xie Cc: andrea.parri@amarulasolutions.com Cc: lilin24@baidu.com Cc: liuqi16@baidu.com Cc: nixun@baidu.com Cc: yuanlinsi01@baidu.com Cc: zhangyu31@baidu.com Link: https://lkml.kernel.org/r/20181218195352.7orq3upiwfdbrdne@linux-r8p5 Signed-off-by: Ingo Molnar --- include/linux/sched/wake_q.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 545f37138057..ad826d2a4557 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); +extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); +extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); #endif /* _LINUX_SCHED_WAKE_Q_H */ -- cgit v1.2.3 From 23127296889fe84b0762b191b5d041e8ba6f2599 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 23 Jan 2019 16:26:53 +0100 Subject: sched/fair: Update scale invariance of PELT The current implementation of load tracking invariance scales the contribution with current frequency and uarch performance (only for utilization) of the CPU. One main result of this formula is that the figures are capped by current capacity of CPU. Another one is that the load_avg is not invariant because not scaled with uarch. The util_avg of a periodic task that runs r time slots every p time slots varies in the range : U * (1-y^r)/(1-y^p) * y^i < Utilization < U * (1-y^r)/(1-y^p) with U is the max util_avg value = SCHED_CAPACITY_SCALE At a lower capacity, the range becomes: U * C * (1-y^r')/(1-y^p) * y^i' < Utilization < U * C * (1-y^r')/(1-y^p) with C reflecting the compute capacity ratio between current capacity and max capacity. so C tries to compensate changes in (1-y^r') but it can't be accurate. Instead of scaling the contribution value of PELT algo, we should scale the running time. The PELT signal aims to track the amount of computation of tasks and/or rq so it seems more correct to scale the running time to reflect the effective amount of computation done since the last update. In order to be fully invariant, we need to apply the same amount of running time and idle time whatever the current capacity. Because running at lower capacity implies that the task will run longer, we have to ensure that the same amount of idle time will be applied when system becomes idle and no idle time has been "stolen". But reaching the maximum utilization value (SCHED_CAPACITY_SCALE) means that the task is seen as an always-running task whatever the capacity of the CPU (even at max compute capacity). In this case, we can discard this "stolen" idle times which becomes meaningless. In order to achieve this time scaling, a new clock_pelt is created per rq. The increase of this clock scales with current capacity when something is running on rq and synchronizes with clock_task when rq is idle. With this mechanism, we ensure the same running and idle time whatever the current capacity. This also enables to simplify the pelt algorithm by removing all references of uarch and frequency and applying the same contribution to utilization and loads. Furthermore, the scaling is done only once per update of clock (update_rq_clock_task()) instead of during each update of sched_entities and cfs/rt/dl_rq of the rq like the current implementation. This is interesting when cgroup are involved as shown in the results below: On a hikey (octo Arm64 platform). Performance cpufreq governor and only shallowest c-state to remove variance generated by those power features so we only track the impact of pelt algo. each test runs 16 times: ./perf bench sched pipe (higher is better) kernel tip/sched/core + patch ops/seconds ops/seconds diff cgroup root 59652(+/- 0.18%) 59876(+/- 0.24%) +0.38% level1 55608(+/- 0.27%) 55923(+/- 0.24%) +0.57% level2 52115(+/- 0.29%) 52564(+/- 0.22%) +0.86% hackbench -l 1000 (lower is better) kernel tip/sched/core + patch duration(sec) duration(sec) diff cgroup root 4.453(+/- 2.37%) 4.383(+/- 2.88%) -1.57% level1 4.859(+/- 8.50%) 4.830(+/- 7.07%) -0.60% level2 5.063(+/- 9.83%) 4.928(+/- 9.66%) -2.66% Then, the responsiveness of PELT is improved when CPU is not running at max capacity with this new algorithm. I have put below some examples of duration to reach some typical load values according to the capacity of the CPU with current implementation and with this patch. These values has been computed based on the geometric series and the half period value: Util (%) max capacity half capacity(mainline) half capacity(w/ patch) 972 (95%) 138ms not reachable 276ms 486 (47.5%) 30ms 138ms 60ms 256 (25%) 13ms 32ms 26ms On my hikey (octo Arm64 platform) with schedutil governor, the time to reach max OPP when starting from a null utilization, decreases from 223ms with current scale invariance down to 121ms with the new algorithm. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: patrick.bellasi@arm.com Cc: pjt@google.com Cc: pkondeti@codeaurora.org Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: srinivas.pandruvada@linux.intel.com Cc: thara.gopinath@linaro.org Link: https://lkml.kernel.org/r/1548257214-13745-3-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 628bf13cb5a5..351c0fe64c85 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -357,12 +357,6 @@ struct util_est { * For cfs_rq, it is the aggregated load_avg of all runnable and * blocked sched_entities. * - * load_avg may also take frequency scaling into account: - * - * load_avg = runnable% * scale_load_down(load) * freq% - * - * where freq% is the CPU frequency normalized to the highest frequency. - * * [util_avg definition] * * util_avg = running% * SCHED_CAPACITY_SCALE @@ -371,17 +365,14 @@ struct util_est { * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable * and blocked sched_entities. * - * util_avg may also factor frequency scaling and CPU capacity scaling: - * - * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% - * - * where freq% is the same as above, and capacity% is the CPU capacity - * normalized to the greatest capacity (due to uarch differences, etc). + * load_avg and util_avg don't direcly factor frequency scaling and CPU + * capacity scaling. The scaling is done through the rq_clock_pelt that + * is used for computing those signals (see update_rq_clock_pelt()) * - * N.B., the above ratios (runnable%, running%, freq%, and capacity%) - * themselves are in the range of [0, 1]. To do fixed point arithmetics, - * we therefore scale them to as large a range as necessary. This is for - * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * N.B., the above ratios (runnable% and running%) themselves are in the + * range of [0, 1]. To do fixed point arithmetics, we therefore scale them + * to as large a range as necessary. This is for example reflected by + * util_avg's SCHED_CAPACITY_SCALE. * * [Overflow issue] * -- cgit v1.2.3 From c546951d9c9300065bad253ecdf1ac59ce9d06c8 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Mon, 21 Jan 2019 16:52:40 +0100 Subject: sched/core: Use READ_ONCE()/WRITE_ONCE() in move_queued_task()/task_rq_lock() move_queued_task() synchronizes with task_rq_lock() as follows: move_queued_task() task_rq_lock() [S] ->on_rq = MIGRATING [L] rq = task_rq() WMB (__set_task_cpu()) ACQUIRE (rq->lock); [S] ->cpu = new_cpu [L] ->on_rq where "[L] rq = task_rq()" is ordered before "ACQUIRE (rq->lock)" by an address dependency and, in turn, "ACQUIRE (rq->lock)" is ordered before "[L] ->on_rq" by the ACQUIRE itself. Use READ_ONCE() to load ->cpu in task_rq() (c.f., task_cpu()) to honor this address dependency. Also, mark the accesses to ->cpu and ->on_rq with READ_ONCE()/WRITE_ONCE() to comply with the LKMM. Signed-off-by: Andrea Parri Signed-off-by: Peter Zijlstra (Intel) Cc: Alan Stern Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: https://lkml.kernel.org/r/20190121155240.27173-1-andrea.parri@amarulasolutions.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 351c0fe64c85..4112639c2a85 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1745,9 +1745,9 @@ static __always_inline bool need_resched(void) static inline unsigned int task_cpu(const struct task_struct *p) { #ifdef CONFIG_THREAD_INFO_IN_TASK - return p->cpu; + return READ_ONCE(p->cpu); #else - return task_thread_info(p)->cpu; + return READ_ONCE(task_thread_info(p)->cpu); #endif } -- cgit v1.2.3 From 77000bc43da17d5d6bc4ebfaf44d52d43bb69492 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 4 Feb 2019 16:31:04 +0100 Subject: uio: remove the unused iov_for_each macro Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- include/linux/uio.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/uio.h b/include/linux/uio.h index ecf584f6b82d..87477e1640f9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -#define iov_for_each(iov, iter, start) \ - if (iov_iter_type(start) == ITER_IOVEC || \ - iov_iter_type(start) == ITER_KVEC) \ - for (iter = (start); \ - (iter).count && \ - ((iov = iov_iter_iovec(&(iter))), 1); \ - iov_iter_advance(&(iter), (iov).iov_len)) - size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); -- cgit v1.2.3 From 960587285a56ec3cafb4d1e6b25c19eced4d0bce Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 2 Feb 2019 10:16:59 +0100 Subject: netfilter: nat: remove module dependency on ipv6 core nf_nat_ipv6 calls two ipv6 core functions, so add those to v6ops to avoid the module dependency. This is a prerequisite for merging ipv4 and ipv6 nat implementations. Add wrappers to avoid the indirection if ipv6 is builtin. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv6.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index c0dc4dd78887..ad4223c10488 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -33,6 +33,12 @@ struct nf_ipv6_ops { int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); +#if IS_MODULE(CONFIG_IPV6) + int (*route_me_harder)(struct net *net, struct sk_buff *skb); + int (*dev_get_saddr)(struct net *net, const struct net_device *dev, + const struct in6_addr *daddr, unsigned int srcprefs, + struct in6_addr *saddr); +#endif }; #ifdef CONFIG_NETFILTER -- cgit v1.2.3 From ac02bcf9cc1e4aefb0a7156a2ae26e8396b15f24 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 2 Feb 2019 10:17:00 +0100 Subject: netfilter: ipv6: avoid indirect calls for IPV6=y case indirect calls are only needed if ipv6 is a module. Add helpers to abstract the v6ops indirections and use them instead. fragment, reroute and route_input are kept as indirect calls. The first two are not not used in hot path and route_input is only used by bridge netfilter. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv6.h | 64 +++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index ad4223c10488..471e9467105b 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -25,29 +25,24 @@ struct nf_queue_entry; * if IPv6 is a module. */ struct nf_ipv6_ops { +#if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); - void (*route_input)(struct sk_buff *skb); - int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, - int (*output)(struct net *, struct sock *, struct sk_buff *)); - int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, - bool strict); - int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); -#if IS_MODULE(CONFIG_IPV6) int (*route_me_harder)(struct net *net, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); #endif + void (*route_input)(struct sk_buff *skb); + int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); }; #ifdef CONFIG_NETFILTER -int ip6_route_me_harder(struct net *net, struct sk_buff *skb); -__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); - -int ipv6_netfilter_init(void); -void ipv6_netfilter_fini(void); +#include extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) @@ -55,6 +50,49 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) return rcu_dereference(nf_ipv6_ops); } +static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (!v6_ops) + return 1; + + return v6_ops->chk_addr(net, addr, dev, strict); +#else + return ipv6_chk_addr(net, addr, dev, strict); +#endif +} + +int __nf_ip6_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict); + +static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); + + if (v6ops) + return v6ops->route(net, dst, fl, strict); + + return -EHOSTUNREACH; +#endif +#if IS_BUILTIN(CONFIG_IPV6) + return __nf_ip6_route(net, dst, fl, strict); +#else + return -EHOSTUNREACH; +#endif +} + +int ip6_route_me_harder(struct net *net, struct sk_buff *skb); +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + +int ipv6_netfilter_init(void); +void ipv6_netfilter_fini(void); + #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } -- cgit v1.2.3 From 278311e417be60f7caef6fcb12bda4da2711ceff Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Mon, 21 Jan 2019 17:59:29 +0800 Subject: kexec, KEYS: Make use of platform keyring for signature verify This patch allows the kexec_file_load syscall to verify the PE signed kernel image signature based on the preboot keys stored in the .platform keyring, as fall back, if the signature verification failed due to not finding the public key in the secondary or builtin keyrings. This commit adds a VERIFY_USE_PLATFORM_KEYRING similar to previous VERIFY_USE_SECONDARY_KEYRING indicating that verify_pkcs7_signature should verify the signature using platform keyring. Also, decrease the error message log level when verification failed with -ENOKEY, so that if called tried multiple time with different keyring it won't generate extra noises. Signed-off-by: Kairui Song Cc: David Howells Acked-by: Dave Young (for kexec_file_load part) [zohar@linux.ibm.com: tweaked the first paragraph of the patch description, and fixed checkpatch warning.] Signed-off-by: Mimi Zohar --- include/linux/verification.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/verification.h b/include/linux/verification.h index cfa4730d607a..018fb5f13d44 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -17,6 +17,7 @@ * should be used. */ #define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) +#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL) /* * The use to which an asymmetric key is being put. -- cgit v1.2.3 From fdb2410f7702f25f82804a261f90ad03422bd2c3 Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Tue, 22 Jan 2019 14:06:49 -0600 Subject: ima: define ima_post_create_tmpfile() hook and add missing call If tmpfiles can be made persistent, then newly created tmpfiles need to be treated like any other new files in policy. This patch indicates which newly created tmpfiles are in policy, causing the file hash to be calculated on __fput(). Reported-by: Ignaz Forster [rgoldwyn@suse.com: Call ima_post_create_tmpfile() in vfs_tmpfile() as opposed to do_tmpfile(). This will help the case for overlayfs where copy_up is denied while overwriting a file.] Signed-off-by: Goldwyn Rodrigues Signed-off-by: Mimi Zohar --- include/linux/ima.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ima.h b/include/linux/ima.h index b5e16b8c50b7..dc12fbcf484c 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,6 +18,7 @@ struct linux_binprm; #ifdef CONFIG_IMA extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); +extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id); @@ -56,6 +57,10 @@ static inline int ima_file_check(struct file *file, int mask) return 0; } +static inline void ima_post_create_tmpfile(struct inode *inode) +{ +} + static inline void ima_file_free(struct file *file) { return; -- cgit v1.2.3 From 5468e82f7034f0ae175a3ce075441356099bdaa3 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 4 Feb 2019 11:26:18 +0100 Subject: net: phy: fixed-phy: Drop GPIO from fixed_phy_add() All users of the fixed_phy_add() pass -1 as GPIO number to the fixed phy driver, and all users of fixed_phy_register() pass -1 as GPIO number as well, except for the device tree MDIO bus. Any new users should create a proper device and pass the GPIO as a descriptor associated with the device so delete the GPIO argument from the calls and drop the code looking requesting a GPIO in fixed_phy_add(). In fixed phy_register(), investigate the "fixed-link" node and pick the GPIO descriptor from "link-gpios" if this property exists. Move the corresponding code out of of_mdio.c as the fixed phy code anyways requires OF to be in use. Tested-by: Andrew Lunn Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- include/linux/phy_fixed.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 9525567b1951..c78fc203db43 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -15,11 +15,9 @@ struct device_node; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); extern int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status, - int link_gpio); + struct fixed_phy_status *status); extern struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, - int link_gpio, struct device_node *np); extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, @@ -27,14 +25,12 @@ extern int fixed_phy_set_link_update(struct phy_device *phydev, struct fixed_phy_status *)); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status, - int link_gpio) + struct fixed_phy_status *status) { return -ENODEV; } static inline struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, - int gpio_link, struct device_node *np) { return ERR_PTR(-ENODEV); -- cgit v1.2.3 From 809ab9371ca0a96b44d9866ad82849410759a45b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sat, 26 Jan 2019 00:52:26 -0500 Subject: XArray: Update xa_erase family descriptions xa_erase does not allocate memory and doesn't have a gfp parameter. Update the descriptions of all four variants to be more useful. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5d9d318bcf7a..e11841537631 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -526,9 +526,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. @@ -550,9 +550,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. -- cgit v1.2.3 From fe6f42cf6eb3183ebd6ab6b0b7dcbee2600c2baa Mon Sep 17 00:00:00 2001 From: Nava kishore Manne Date: Wed, 6 Feb 2019 16:37:19 +0530 Subject: firmware: xilinx: Add zynqmp_pm_get_chipid() API This patch adds a new API to provide access to the hardware related data like soc revision, IDCODE... etc. Signed-off-by: Nava kishore Manne Signed-off-by: Michal Simek --- include/linux/firmware/xlnx-zynqmp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 07c587a0b06e..5a1f19848100 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -36,6 +36,7 @@ enum pm_api_id { PM_GET_API_VERSION = 1, PM_RESET_ASSERT = 17, PM_RESET_GET_STATUS, + PM_GET_CHIPID = 24, PM_IOCTL = 34, PM_QUERY_DATA, PM_CLOCK_ENABLE, @@ -224,6 +225,7 @@ struct zynqmp_pm_query_data { struct zynqmp_eemi_ops { int (*get_api_version)(u32 *version); + int (*get_chipid)(u32 *idcode, u32 *version); int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); int (*clock_enable)(u32 clock_id); int (*clock_disable)(u32 clock_id); -- cgit v1.2.3 From 2292822e1576c89191a65c3d0da584d75d3c033f Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 19 Jan 2019 13:16:53 +0100 Subject: i2c: algo-bit: include main i2c header We are using symbols from it, so we should include it directly. Found after sorting includes in a driver. Signed-off-by: Wolfram Sang Reviewed-by: Simon Horman Signed-off-by: Wolfram Sang --- include/linux/i2c-algo-bit.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 63904ba6887e..d64cebc6e65a 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -25,6 +25,8 @@ #ifndef _LINUX_I2C_ALGO_BIT_H #define _LINUX_I2C_ALGO_BIT_H +#include + /* --- Defines for bit-adapters --------------------------------------- */ /* * This struct contains the hw-dependent functions of bit-style adapters to -- cgit v1.2.3 From 738ac0679b969776a638daf2cfb5011049d467da Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 19 Jan 2019 13:16:54 +0100 Subject: i2c: algo-bit: convert to SPDX header And use kernel style for the remaining comments in the header. Signed-off-by: Wolfram Sang Reviewed-by: Simon Horman Signed-off-by: Wolfram Sang --- include/linux/i2c-algo-bit.h | 31 ++++++++----------------------- 1 file changed, 8 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index d64cebc6e65a..69045df78e2d 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h @@ -1,26 +1,11 @@ -/* ------------------------------------------------------------------------- */ -/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ -/* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-99 Simon G. Vogl - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. */ -/* ------------------------------------------------------------------------- */ - -/* With some changes from Kyösti Mälkki and even - Frodo Looijaard */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters + * + * Copyright (C) 1995-99 Simon G. Vogl + * With some changes from Kyösti Mälkki and even + * Frodo Looijaard + */ #ifndef _LINUX_I2C_ALGO_BIT_H #define _LINUX_I2C_ALGO_BIT_H -- cgit v1.2.3 From b525903c254dab2491410f0f23707691b7c2c317 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:53:58 +0000 Subject: genirq: Provide basic NMI management for interrupt lines Add functionality to allocate interrupt lines that will deliver IRQs as Non-Maskable Interrupts. These allocations are only successful if the irqchip provides the necessary support and allows NMI delivery for the interrupt line. Interrupt lines allocated for NMI delivery must be enabled/disabled through enable_nmi/disable_nmi_nosync to keep their state consistent. To treat a PERCPU IRQ as NMI, the interrupt must not be shared nor threaded, the irqchip directly managing the IRQ must be the root irqchip and the irqchip cannot be behind a slow bus. Signed-off-by: Julien Thierry Reviewed-by: Marc Zyngier Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Marc Zyngier Signed-off-by: Marc Zyngier --- include/linux/interrupt.h | 9 +++++++++ include/linux/irq.h | 7 +++++++ 2 files changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c672f34235e7..9941d1a8d83c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, void __percpu *percpu_dev_id); +extern int __must_check +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + static inline int __must_check request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *percpu_dev_id) @@ -167,6 +171,8 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, extern const void *free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); +extern const void *free_nmi(unsigned int irq, void *dev_id); + struct device; extern int __must_check @@ -217,6 +223,9 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); +extern void disable_nmi_nosync(unsigned int irq); +extern void enable_nmi(unsigned int irq); + /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); diff --git a/include/linux/irq.h b/include/linux/irq.h index def2b2aac8b1..a7298e4998c8 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine * @ipi_send_single: send a single IPI to destination cpus * @ipi_send_mask: send an IPI to destination cpus in cpumask + * @irq_nmi_setup: function called from core code before enabling an NMI + * @irq_nmi_teardown: function called from core code after disabling an NMI * @flags: chip specific flags */ struct irq_chip { @@ -490,6 +492,9 @@ struct irq_chip { void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); + int (*irq_nmi_setup)(struct irq_data *data); + void (*irq_nmi_teardown)(struct irq_data *data); + unsigned long flags; }; @@ -505,6 +510,7 @@ struct irq_chip { * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs + * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -515,6 +521,7 @@ enum { IRQCHIP_ONESHOT_SAFE = (1 << 5), IRQCHIP_EOI_THREADED = (1 << 6), IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), + IRQCHIP_SUPPORTS_NMI = (1 << 8), }; #include -- cgit v1.2.3 From 4b078c3f1a26487c39363089ba0d5c6b09f2a89f Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:53:59 +0000 Subject: genirq: Provide NMI management for percpu_devid interrupts Add support for percpu_devid interrupts treated as NMIs. Percpu_devid NMIs need to be setup/torn down on each CPU they target. The same restrictions as for global NMIs still apply for percpu_devid NMIs. Signed-off-by: Julien Thierry Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Marc Zyngier Signed-off-by: Marc Zyngier --- include/linux/interrupt.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9941d1a8d83c..831ddcdc5597 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -168,10 +168,15 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, devname, percpu_dev_id); } +extern int __must_check +request_percpu_nmi(unsigned int irq, irq_handler_t handler, + const char *devname, void __percpu *dev); + extern const void *free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); extern const void *free_nmi(unsigned int irq, void *dev_id); +extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); struct device; @@ -224,7 +229,11 @@ extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); extern void disable_nmi_nosync(unsigned int irq); +extern void disable_percpu_nmi(unsigned int irq); extern void enable_nmi(unsigned int irq); +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); +extern int prepare_percpu_nmi(unsigned int irq); +extern void teardown_percpu_nmi(unsigned int irq); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); -- cgit v1.2.3 From 2dcf1fbcad352baaa5f47b17e57c5743c8eedbad Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:54:00 +0000 Subject: genirq: Provide NMI handlers Provide flow handlers that are NMI safe for interrupts and percpu_devid interrupts. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Cc: Peter Zijlstra Signed-off-by: Marc Zyngier --- include/linux/irq.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index a7298e4998c8..5e91f6bcaacd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -601,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); +extern void handle_fasteoi_nmi(struct irq_desc *desc); +extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); + extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data); -- cgit v1.2.3 From 6e4933a006616343f66c4702dc4fc56bb25e7b02 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:54:01 +0000 Subject: irqdesc: Add domain handler for NMIs NMI handling code should be executed between calls to nmi_enter and nmi_exit. Add a separate domain handler to properly setup NMI context when handling an interrupt requested as NMI. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Cc: Will Deacon Cc: Peter Zijlstra Signed-off-by: Marc Zyngier --- include/linux/irqdesc.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dd1e40ddac7d..ba05b0d6401a 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -171,6 +171,11 @@ static inline int handle_domain_irq(struct irq_domain *domain, { return __handle_domain_irq(domain, hwirq, true, regs); } + +#ifdef CONFIG_IRQ_DOMAIN +int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, + struct pt_regs *regs); +#endif #endif /* Test to see if a driver has successfully requested an irq */ -- cgit v1.2.3 From 013e6292aaf5e4b083a50a0f9e17e93628616860 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Tue, 20 Nov 2018 11:57:20 +0100 Subject: mtd: rawnand: Simplify the locking nand_get_device() was complex for apparently no good reason. Let's replace this locking scheme with 2 mutexes: one attached to the controller and another one attached to the chip. Every time the core calls nand_get_device(), it will first lock the chip and if the chip is not suspended, will then lock the controller. nand_release_device() will release both lock in the reverse order. nand_get_device() can sleep, just like the previous implementation, which means you should never call that from an atomic context. We also get rid of - the chip->state field, since all it was used for was flagging the chip as suspended. We replace it by a field called chip->suspended and directly set it from nand_suspend/resume() - the controller->wq and controller->active fields which are no longer needed since the new controller->lock (now a mutex) guarantees that all operations are serialized at the controller level - panic_nand_get_device() which would anyway be a no-op. Talking about panic write, I keep thinking the rawnand implementation is unsafe because there's not negotiation with the controller to know when it's actually done with it's previous operation. I don't intend to fix that here, but that's probably something we should look at, or maybe we should consider dropping the ->_panic_write() implementation Last important change to mention: we now return -EBUSY when someone tries to access a device that as been suspended, and propagate this error to the upper layer. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal --- include/linux/mtd/rawnand.h | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 33e240acdc6d..17d2d9ae33bf 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -16,13 +16,12 @@ #ifndef __LINUX_MTD_RAWNAND_H #define __LINUX_MTD_RAWNAND_H -#include -#include #include #include #include #include #include +#include #include #include @@ -897,25 +896,17 @@ struct nand_controller_ops { /** * struct nand_controller - Structure used to describe a NAND controller * - * @lock: protection lock - * @active: the mtd device which holds the controller currently - * @wq: wait queue to sleep on if a NAND operation is in - * progress used instead of the per chip wait queue - * when a hw controller is available. + * @lock: lock used to serialize accesses to the NAND controller * @ops: NAND controller operations. */ struct nand_controller { - spinlock_t lock; - struct nand_chip *active; - wait_queue_head_t wq; + struct mutex lock; const struct nand_controller_ops *ops; }; static inline void nand_controller_init(struct nand_controller *nfc) { - nfc->active = NULL; - spin_lock_init(&nfc->lock); - init_waitqueue_head(&nfc->wq); + mutex_init(&nfc->lock); } /** @@ -983,7 +974,6 @@ struct nand_legacy { * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform - * @state: [INTERN] the current state of the NAND device * @oob_poi: "poison value buffer," used for laying out OOB data * before writing * @page_shift: [INTERN] number of address bits in a page (column @@ -1034,6 +1024,9 @@ struct nand_legacy { * cur_cs < numchips. NAND Controller drivers should not * modify this value, but they're allowed to read it. * @read_retries: [INTERN] the number of read retry modes supported + * @lock: lock protecting the suspended field. Also used to + * serialize accesses to the NAND device. + * @suspended: set to 1 when the device is suspended, 0 when it's not. * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1088,7 +1081,8 @@ struct nand_chip { int read_retries; - flstate_t state; + struct mutex lock; + unsigned int suspended : 1; uint8_t *oob_poi; struct nand_controller *controller; -- cgit v1.2.3 From 2d73f3d66b7052c0175f9f33d271ae50826c222e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 21 Jan 2019 15:32:07 +0900 Subject: mtd: rawnand: remove ->legacy.erase and single_erase() Now that the last user of this hook, denali.c, stopped using it, we can remove the erase hook from nand_legacy. I squashed single_erase() because only the difference between single_erase() and nand_erase_op() is the number of bit shifts. The status/ret conversion in nand_erase_nand() is unneeded since commit eb94555e9e97 ("mtd: nand: use usual return values for the ->erase() hook"). Cleaned it up now. Signed-off-by: Masahiro Yamada Reviewed-by: Boris Brezillon Signed-off-by: Miquel Raynal --- include/linux/mtd/rawnand.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 17d2d9ae33bf..b7445a44a814 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -927,7 +927,6 @@ static inline void nand_controller_init(struct nand_controller *nfc) * @waitfunc: hardware specific function for wait on ready. * @block_bad: check if a block is bad, using OOB markers * @block_markbad: mark a block bad - * @erase: erase function * @set_features: set the NAND chip features * @get_features: get the NAND chip features * @chip_delay: chip dependent delay for transferring data from array to read @@ -953,7 +952,6 @@ struct nand_legacy { int (*waitfunc)(struct nand_chip *chip); int (*block_bad)(struct nand_chip *chip, loff_t ofs); int (*block_markbad)(struct nand_chip *chip, loff_t ofs); - int (*erase)(struct nand_chip *chip, int page); int (*set_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int (*get_features)(struct nand_chip *chip, int feature_addr, -- cgit v1.2.3 From 278bca7f318e6a29f482eabbca52db538dc5d4e6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 10 Jan 2019 21:00:27 +0200 Subject: vfio-mdev: Switch to use new generic UUID API There are new types and helpers that are supposed to be used in new code. As a preparation to get rid of legacy types and API functions do the conversion here. Cc: Kirti Wankhede Cc: Alex Williamson Signed-off-by: Andy Shevchenko Reviewed-by: Christoph Hellwig Signed-off-by: Alex Williamson --- include/linux/mdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mdev.h b/include/linux/mdev.h index b6e048e1045f..d7aee90e5da5 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -120,7 +120,7 @@ struct mdev_driver { extern void *mdev_get_drvdata(struct mdev_device *mdev); extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); -extern uuid_le mdev_uuid(struct mdev_device *mdev); +extern const guid_t *mdev_uuid(struct mdev_device *mdev); extern struct bus_type mdev_bus_type; -- cgit v1.2.3 From 972248e9111ee6fe9fb56c24ecfd7434f3d713ac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Jan 2019 09:32:03 +0100 Subject: scsi: bsg-lib: handle bidi requests without block layer help We can just stash away the second request in struct bsg_job instead of using the block layer req->next_rq field, allowing for the eventual removal of the latter. Signed-off-by: Christoph Hellwig Acked-by: Jens Axboe Signed-off-by: Martin K. Petersen --- include/linux/bsg-lib.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index b356e0006731..7f14517a559b 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -69,6 +69,10 @@ struct bsg_job { int result; unsigned int reply_payload_rcv_len; + /* BIDI support */ + struct request *bidi_rq; + struct bio *bidi_bio; + void *dd_data; /* Used for driver-specific storage */ }; -- cgit v1.2.3 From 69ed175c195595c73901e18366cb0ebeaeb68b8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 9 Nov 2018 19:35:11 +0100 Subject: scsi: block: remove req->special No users left. Signed-off-by: Christoph Hellwig Acked-by: Jens Axboe Signed-off-by: Martin K. Petersen --- include/linux/blkdev.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 338604dff7d0..fd1450d53f1c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -216,8 +216,6 @@ struct request { unsigned short write_hint; unsigned short ioprio; - void *special; /* opaque pointer available for LLD use */ - unsigned int extra_len; /* length of alignment and padding */ enum mq_rq_state state; -- cgit v1.2.3 From 8b3238cabd50e2715b6544e724e74685209b190a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 6 Dec 2018 08:01:10 -0800 Subject: scsi: block: remove bidi support Unused now, and another field in struct request bites the dust. Signed-off-by: Christoph Hellwig Acked-by: Jens Axboe Signed-off-by: Martin K. Petersen --- include/linux/blkdev.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fd1450d53f1c..21beb456b97a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -234,9 +234,6 @@ struct request { */ rq_end_io_fn *end_io; void *end_io_data; - - /* for bidi */ - struct request *next_rq; }; static inline bool blk_op_is_scsi(unsigned int op) @@ -572,7 +569,6 @@ struct request_queue { #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ #define QUEUE_FLAG_DYING 2 /* queue being torn down */ -#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ @@ -644,8 +640,6 @@ static inline bool blk_account_rq(struct request *rq) return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); } -#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) - #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) -- cgit v1.2.3 From 5870970b9a828d8693aa6d15742573289d7dbcd0 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:39 +0000 Subject: arm64: Fix HCR.TGE status for NMI contexts When using VHE, the host needs to clear HCR_EL2.TGE bit in order to interact with guest TLBs, switching from EL2&0 translation regime to EL1&0. However, some non-maskable asynchronous event could happen while TGE is cleared like SDEI. Because of this address translation operations relying on EL2&0 translation regime could fail (tlb invalidation, userspace access, ...). Fix this by properly setting HCR_EL2.TGE when entering NMI context and clear it if necessary when returning to the interrupted context. Signed-off-by: Julien Thierry Suggested-by: Marc Zyngier Reviewed-by: Marc Zyngier Reviewed-by: James Morse Cc: Arnd Bergmann Cc: Will Deacon Cc: Marc Zyngier Cc: James Morse Cc: linux-arch@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Catalin Marinas --- include/linux/hardirq.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 0fbbcdf0c178..da0af631ded5 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -60,8 +60,14 @@ extern void irq_enter(void); */ extern void irq_exit(void); +#ifndef arch_nmi_enter +#define arch_nmi_enter() do { } while (0) +#define arch_nmi_exit() do { } while (0) +#endif + #define nmi_enter() \ do { \ + arch_nmi_enter(); \ printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ @@ -80,6 +86,7 @@ extern void irq_exit(void); ftrace_nmi_exit(); \ lockdep_on(); \ printk_nmi_exit(); \ + arch_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ -- cgit v1.2.3 From 13b210ddf474d9f3368766008a89fe82a6f90b48 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:49 +0000 Subject: efi: Let architectures decide the flags that should be saved/restored Currently, irqflags are saved before calling runtime services and checked for mismatch on return. Provide a pair of overridable macros to save and restore (if needed) the state that need to be preserved on return from a runtime service. This allows to check for flags that are not necesarly related to irqflags. Signed-off-by: Julien Thierry Acked-by: Catalin Marinas Acked-by: Ard Biesheuvel Acked-by: Marc Zyngier Cc: Ard Biesheuvel Cc: linux-efi@vger.kernel.org Signed-off-by: Catalin Marinas --- include/linux/efi.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..bd80b7ec35db 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1607,6 +1607,7 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, bool efi_runtime_disabled(void); extern void efi_call_virt_check_flags(unsigned long flags, const char *call); +extern unsigned long efi_call_virt_save_flags(void); enum efi_secureboot_mode { efi_secureboot_mode_unset, @@ -1652,7 +1653,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); \ arch_efi_call_virt_setup(); \ \ - local_save_flags(__flags); \ + __flags = efi_call_virt_save_flags(); \ __s = arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ @@ -1667,7 +1668,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); \ arch_efi_call_virt_setup(); \ \ - local_save_flags(__flags); \ + __flags = efi_call_virt_save_flags(); \ arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ -- cgit v1.2.3 From 840018668ce2d96783356204ff282d6c9b0e5f66 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Thu, 31 Jan 2019 11:47:08 -0700 Subject: perf/aux: Make perf_event accessible to setup_aux() When pmu::setup_aux() is called the coresight PMU needs to know which sink to use for the session by looking up the information in the event's attr::config2 field. As such simply replace the cpu information by the complete perf_event structure and change all affected customers. Signed-off-by: Mathieu Poirier Reviewed-by: Suzuki Poulouse Acked-by: Peter Zijlstra Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Heiko Carstens Cc: Jiri Olsa Cc: Mark Rutland Cc: Martin Schwidefsky Cc: Namhyung Kim Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Link: http://lkml.kernel.org/r/20190131184714.20388-2-mathieu.poirier@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6cb5d483ab34..d9c3610e0e25 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -410,7 +410,7 @@ struct pmu { /* * Set up pmu-private data structures for an AUX area */ - void *(*setup_aux) (int cpu, void **pages, + void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ -- cgit v1.2.3 From bb8e370bdc141ddff526e5e5ee74210c91fee0b8 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Thu, 31 Jan 2019 11:47:09 -0700 Subject: coresight: perf: Add "sinks" group to PMU directory Add a "sinks" directory entry so that users can see all the sinks available in the system in a single place. Individual sink are added as they are registered with the coresight bus. Committer tests: Test built on a ubuntu 18.04 container with a cross build environment to arm64, the new field is there, need to find a machine with this feature to do further testing in the future. root@d15263e5734a:/git/perf# grep CORESIGHT /tmp/build/v5.0-rc2+/.config CONFIG_CORESIGHT=y CONFIG_CORESIGHT_LINKS_AND_SINKS=y CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y CONFIG_CORESIGHT_CATU=y CONFIG_CORESIGHT_SINK_TPIU=y CONFIG_CORESIGHT_SINK_ETBV10=y CONFIG_CORESIGHT_SOURCE_ETM4X=y CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_CPU_DEBUG=m root@d15263e5734a:/git/perf# root@d15263e5734a:/git/perf# file /tmp/build/v5.0-rc2+/drivers/hwtracing/coresight/*.o .../coresight/coresight-catu.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-cpu-debug.mod.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-cpu-debug.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-dynamic-replicator.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-etb10.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-etm-perf.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-etm4x-sysfs.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-etm4x.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-funnel.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-replicator.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-stm.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-tmc-etf.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-tmc-etr.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-tmc.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight-tpiu.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/coresight.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped .../coresight/of_coresight.o: ELF 64-bit MSB relocatable, ARM aarch64, version 1 (SYSV), not stripped root@d15263e5734a:/git/perf# root@d15263e5734a:/git/perf# pahole -C coresight_device /tmp/build/v5.0-rc2+/drivers/hwtracing/coresight/coresight.o struct coresight_device { struct coresight_connection * conns; /* 0 8 */ int nr_inport; /* 8 4 */ int nr_outport; /* 12 4 */ enum coresight_dev_type type; /* 16 4 */ union coresight_dev_subtype subtype; /* 20 8 */ /* XXX 4 bytes hole, try to pack */ const struct coresight_ops * ops; /* 32 8 */ struct device dev; /* 40 1408 */ /* XXX last struct has 7 bytes of padding */ /* --- cacheline 22 boundary (1408 bytes) was 40 bytes ago --- */ atomic_t * refcnt; /* 1448 8 */ bool orphan; /* 1456 1 */ bool enable; /* 1457 1 */ bool activated; /* 1458 1 */ /* XXX 5 bytes hole, try to pack */ struct dev_ext_attribute * ea; /* 1464 8 */ /* size: 1472, cachelines: 23, members: 12 */ /* sum members: 1463, holes: 2, sum holes: 9 */ /* paddings: 1, sum paddings: 7 */ }; root@d15263e5734a:/git/perf# Signed-off-by: Mathieu Poirier Reviewed-by: Suzuki K Poulose Acked-by: Peter Zijlstra Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Heiko Carstens Cc: Jiri Olsa Cc: Mark Rutland Cc: Martin Schwidefsky Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Link: http://lkml.kernel.org/r/20190131184714.20388-3-mathieu.poirier@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/coresight.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 46c67a764877..7b87965f7a65 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -154,8 +154,9 @@ struct coresight_connection { * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be - activated but not yet enabled. Enabling for a _sink_ - happens when a source has been selected for that it. + * activated but not yet enabled. Enabling for a _sink_ + * appens when a source has been selected for that it. + * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { struct coresight_connection *conns; @@ -168,7 +169,9 @@ struct coresight_device { atomic_t *refcnt; bool orphan; bool enable; /* true only if configured as part of a path */ + /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ + struct dev_ext_attribute *ea; }; #define to_coresight_device(d) container_of(d, struct coresight_device, dev) -- cgit v1.2.3 From 5f02a877638472e83cb5e335f9eec27052b1c7c2 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 10 Jan 2019 19:04:28 +0200 Subject: fsnotify: annotate directory entry modification events "dirent" events are referring to events that modify directory entries, such as create,delete,rename. Those events should always be reported on a watched directory, regardless if FS_EVENT_ON_CHILD is set on the watch mask. fsnotify_nameremove() and fsnotify_move() were modified to no longer set the FS_EVENT_ON_CHILD event bit. This is a semantic change to align with the "dirent" event definition. It has no effect on any existing backend, because dnotify, inotify and audit always requets the child events and fanotify does not get the delete,rename events. The fsnotify_dirent() helper is used instead of fsnotify_parent() to report a dirent event to dentry->d_parent without FS_EVENT_ON_CHILD and regardless if parent has the FS_EVENT_ON_CHILD bit set. Unlike fsnotify_parent(), fsnotify_dirent() assumes that dentry->d_name and dentry->d_parent are stable. For fsnotify_create()/fsnotify_mkdir(), this assumption is abviously correct. For fsnotify_nameremove(), it is less trivial, so we use dget_parent() and take_dentry_name_snapshot() to grab stable references. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify.h | 49 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 2ccb08cb5d6a..39b22e88423d 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -17,8 +17,22 @@ #include #include +/* + * Notify this @dir inode about a change in the directory entry @dentry. + * + * Unlike fsnotify_parent(), the event will be reported regardless of the + * FS_EVENT_ON_CHILD mask on the parent inode. + */ +static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry, + __u32 mask) +{ + return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, + dentry->d_name.name, 0); +} + /* Notify this dentry's parent about a child's events. */ -static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +static inline int fsnotify_parent(const struct path *path, + struct dentry *dentry, __u32 mask) { if (!dentry) dentry = path->dentry; @@ -85,8 +99,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, { struct inode *source = moved->d_inode; u32 fs_cookie = fsnotify_get_cookie(); - __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); - __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); + __u32 old_dir_mask = FS_MOVED_FROM; + __u32 new_dir_mask = FS_MOVED_TO; const unsigned char *new_name = moved->d_name.name; if (old_dir == new_dir) @@ -128,15 +142,35 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) /* * fsnotify_nameremove - a filename was removed from a directory + * + * This is mostly called under parent vfs inode lock so name and + * dentry->d_parent should be stable. However there are some corner cases where + * inode lock is not held. So to be on the safe side and be reselient to future + * callers and out of tree users of d_delete(), we do not assume that d_parent + * and d_name are stable and we use dget_parent() and + * take_dentry_name_snapshot() to grab stable references. */ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) { + struct dentry *parent; + struct name_snapshot name; __u32 mask = FS_DELETE; + /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */ + if (IS_ROOT(dentry)) + return; + if (isdir) mask |= FS_ISDIR; - fsnotify_parent(NULL, dentry, mask); + parent = dget_parent(dentry); + take_dentry_name_snapshot(&name, dentry); + + fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, + name.name, 0); + + release_dentry_name_snapshot(&name); + dput(parent); } /* @@ -155,7 +189,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); + fsnotify_dirent(inode, dentry, FS_CREATE); } /* @@ -176,12 +210,9 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) { - __u32 mask = (FS_CREATE | FS_ISDIR); - struct inode *d_inode = dentry->d_inode; - audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); + fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR); } /* -- cgit v1.2.3 From e220140ff6241e180d0c2fc294e61ee6bbc6a18e Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 10 Jan 2019 19:04:29 +0200 Subject: fsnotify: remove dirent events from FS_EVENTS_POSS_ON_CHILD mask "dirent" events are referring to events that modify directory entries, such as create,delete,rename. Those events are always be reported on a watched directory, regardless if FS_EVENT_ON_CHILD is set on the watch mask. ALL_FSNOTIFY_DIRENT_EVENTS defines all the dirent event types and those event types are removed from FS_EVENTS_POSS_ON_CHILD. That means for a directory with an inotify watch and only dirent events in the mask (i.e. create,delete,move), all children dentries will no longer have the DCACHE_FSNOTIFY_PARENT_WATCHED flag set. This will allow all events that happen on children to be optimized away in __fsnotify_parent() without the need to dereference child->d_parent->d_inode->i_fsnotify_mask. Since the dirent events are never repoted via __fsnotify_parent(), this results in no change of logic, but only an optimization. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify_backend.h | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 7639774e7475..7f195d43efaf 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -59,27 +59,33 @@ * dnotify and inotify. */ #define FS_EVENT_ON_CHILD 0x08000000 -/* This is a list of all events that may get sent to a parernt based on fs event - * happening to inodes inside that directory */ -#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ - FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ - FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ - FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \ - FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) - #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) +/* + * Directory entry modification events - reported only to directory + * where entry is modified and not to a watching parent. + * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event + * when a directory entry inside a child subdir changes. + */ +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) + #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) +/* + * This is a list of all events that may get sent to a parent based on fs event + * happening to inodes inside that directory. + */ +#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ + FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ + FS_OPEN | FS_OPEN_EXEC) + /* Events that can be reported to backends */ -#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ - FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ - FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ - FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ - FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ - FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \ - FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) +#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ + FS_EVENTS_POSS_ON_CHILD | \ + FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \ + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ -- cgit v1.2.3 From a0a92d261f2922f4b5d2c0a98d6c41a89c7f5edd Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 10 Jan 2019 19:04:31 +0200 Subject: fsnotify: move mask out of struct fsnotify_event Common fsnotify_event helpers have no need for the mask field. It is only used by backend code, so move the field out of the abstract fsnotify_event struct and into the concrete backend event structs. This change packs struct inotify_event_info better on 64bit machine and will allow us to cram some more fields into struct fanotify_event_info. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify_backend.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 7f195d43efaf..1e4b88bd1443 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -135,7 +135,6 @@ struct fsnotify_event { struct list_head list; /* inode may ONLY be dereferenced during handle_event(). */ struct inode *inode; /* either the inode the event happened to or its parent */ - u32 mask; /* the type of access, bitwise OR for FS_* event types */ }; /* @@ -485,9 +484,12 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); -/* put here because inotify does some weird stuff when destroying watches */ -extern void fsnotify_init_event(struct fsnotify_event *event, - struct inode *to_tell, u32 mask); +static inline void fsnotify_init_event(struct fsnotify_event *event, + struct inode *inode) +{ + INIT_LIST_HEAD(&event->list); + event->inode = inode; +} #else -- cgit v1.2.3 From d6cd33ad71029a3f77ba1686caf55d4dea58d916 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 29 Jan 2019 11:31:52 +0100 Subject: regulator: gpio: Convert to use descriptors This converts the GPIO regulator driver to use decriptors only. We have to let go of the array gpio handling: the fetched descriptors are handled individually anyway, and the array retrieveal function does not make it possible to retrieve each GPIO descriptor with unique flags. Instead get them one by one. We request the "enable" GPIO separately as before, and make sure that this line is requested as nonexclusive since enable lines can be shared and the regulator core expects this. Most users of the GPIO regulator are using device tree. There are two boards in the kernel using the gpio regulator from a non-devicetree path: PXA hx4700 and magician. Make sure to switch these over to use descriptors as well. Cc: Philipp Zabel # Magician Cc: Petr Cvek # Magician Cc: Robert Jarzmik # PXA Cc: Paul Parsons # hx4700 Cc: Kevin Hilman # Meson Cc: Neil Armstrong # Meson Tested-by: Marek Szyprowski Signed-off-by: Linus Walleij Signed-off-by: Mark Brown --- include/linux/regulator/gpio-regulator.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h index 19fbd267406d..49c407afb944 100644 --- a/include/linux/regulator/gpio-regulator.h +++ b/include/linux/regulator/gpio-regulator.h @@ -21,6 +21,8 @@ #ifndef __REGULATOR_GPIO_H #define __REGULATOR_GPIO_H +#include + struct regulator_init_data; enum regulator_type; @@ -53,9 +55,9 @@ struct gpio_regulator_state { * This is used to keep the regulator at * the default state * @startup_delay: Start-up time in microseconds - * @gpios: Array containing the gpios needed to control - * the setting of the regulator - * @nr_gpios: Number of gpios + * @gflags: Array of GPIO configuration flags for initial + * states + * @ngpios: Number of GPIOs and configurations available * @states: Array of gpio_regulator_state entries describing * the gpio state for specific voltages * @nr_states: Number of states available @@ -74,8 +76,8 @@ struct gpio_regulator_config { unsigned enabled_at_boot:1; unsigned startup_delay; - struct gpio *gpios; - int nr_gpios; + enum gpiod_flags *gflags; + int ngpios; struct gpio_regulator_state *states; int nr_states; -- cgit v1.2.3 From 01dc79cd6fe7d25b0eba84009634f5435cbdb4e6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 29 Jan 2019 11:31:53 +0100 Subject: regulator: fixed/gpio: Pull inversion/OD into gpiolib This pushes the handling of inversion semantics and open drain settings to the GPIO descriptor and gpiolib. All affected board files are also augmented. This is especially nice since we don't have to have any confusing flags passed around to the left and right littering the fixed and GPIO regulator drivers and the regulator core. It is all just very straight-forward: the core asks the GPIO line to be asserted or deasserted and gpiolib deals with the rest depending on how the platform is configured: if the line is active low, it deals with that, if the line is open drain, it deals with that too. Cc: Alexander Shiyan # i.MX boards user Cc: Haojian Zhuang # MMP2 maintainer Cc: Aaro Koskinen # OMAP1 maintainer Cc: Tony Lindgren # OMAP1,2,3 maintainer Cc: Mike Rapoport # EM-X270 maintainer Cc: Robert Jarzmik # EZX maintainer Cc: Philipp Zabel # Magician maintainer Cc: Petr Cvek # Magician Cc: Robert Jarzmik # PXA Cc: Paul Parsons # hx4700 Cc: Daniel Mack # Raumfeld maintainer Cc: Marc Zyngier # Zeus maintainer Cc: Geert Uytterhoeven # SuperH pinctrl/GPIO maintainer Cc: Russell King # SA1100 Tested-by: Marek Szyprowski Tested-by: Janusz Krzysztofik #OMAP1 Amstrad Delta Signed-off-by: Linus Walleij Signed-off-by: Mark Brown --- include/linux/regulator/fixed.h | 10 ---------- include/linux/regulator/gpio-regulator.h | 6 ------ 2 files changed, 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1a4340ed8e2b..f10140da7145 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -25,14 +25,6 @@ struct regulator_init_data; * @input_supply: Name of the input regulator supply * @microvolts: Output voltage of regulator * @startup_delay: Start-up time in microseconds - * @gpio_is_open_drain: Gpio pin is open drain or normal type. - * If it is open drain type then HIGH will be set - * through PULL-UP with setting gpio as input - * and low will be set as gpio-output with driven - * to low. For non-open-drain case, the gpio will - * will be in output and drive to low/high accordingly. - * @enable_high: Polarity of enable GPIO - * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at @@ -48,8 +40,6 @@ struct fixed_voltage_config { const char *input_supply; int microvolts; unsigned startup_delay; - unsigned gpio_is_open_drain:1; - unsigned enable_high:1; unsigned enabled_at_boot:1; struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h index 49c407afb944..11cd6375215d 100644 --- a/include/linux/regulator/gpio-regulator.h +++ b/include/linux/regulator/gpio-regulator.h @@ -46,10 +46,6 @@ struct gpio_regulator_state { /** * struct gpio_regulator_config - config structure * @supply_name: Name of the regulator supply - * @enable_gpio: GPIO to use for enable control - * set to -EINVAL if not used - * @enable_high: Polarity of enable GPIO - * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at @@ -71,8 +67,6 @@ struct gpio_regulator_state { struct gpio_regulator_config { const char *supply_name; - int enable_gpio; - unsigned enable_high:1; unsigned enabled_at_boot:1; unsigned startup_delay; -- cgit v1.2.3 From 541d052d721506549774ab780a2709e4ff8ca79b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 29 Jan 2019 11:31:56 +0100 Subject: regulator: core: Only support passing enable GPIO descriptors Now that we changed all providers to pass descriptors into the core for enable GPIOs instead of a global GPIO number, delete the support for passing GPIO numbers in, and we get a cleanup and size reduction in the core, and from a GPIO point of view we use the modern, cleaner interface. Tested-by: Marek Szyprowski Signed-off-by: Linus Walleij Signed-off-by: Mark Brown --- include/linux/regulator/driver.h | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 795b38a06b6c..7f8345bff4e1 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -401,13 +401,7 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is * insufficient. - * @ena_gpio_initialized: GPIO controlling regulator enable was properly - * initialized, meaning that >= 0 is a valid gpio - * identifier and < 0 is a non existent gpio. - * @ena_gpio: GPIO controlling regulator enable. - * @ena_gpiod: GPIO descriptor controlling regulator enable. - * @ena_gpio_invert: Sense for GPIO enable control. - * @ena_gpio_flags: Flags to use when calling gpio_request_one() + * @ena_gpiod: GPIO controlling regulator enable. */ struct regulator_config { struct device *dev; @@ -416,11 +410,7 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; - bool ena_gpio_initialized; - int ena_gpio; struct gpio_desc *ena_gpiod; - unsigned int ena_gpio_invert:1; - unsigned int ena_gpio_flags; }; /* -- cgit v1.2.3 From d325c402964e7c63db94e9138c530832269a1297 Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Fri, 28 Dec 2018 14:38:47 +0100 Subject: ring-buffer: Remove unused function ring_buffer_page_len() Commit 6b7e633fe9c2 ("tracing: Remove extra zeroing out of the ring buffer page") removed the only caller of ring_buffer_page_len(). The function is now unused and may be removed. Link: http://lkml.kernel.org/r/20181228133847.106177-1-mbenes@suse.cz Signed-off-by: Miroslav Benes Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5b9ae62272bb..f1429675f252 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -187,8 +187,6 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); -size_t ring_buffer_page_len(void *page); - size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); -- cgit v1.2.3 From 1878f0dcbff0cd07f62602deb160a44d69a8f146 Mon Sep 17 00:00:00 2001 From: Nikita Yushchenko Date: Wed, 6 Feb 2019 07:36:40 +0100 Subject: net: phy: provide full set of accessor functions to MMD registers This adds full set of locked and unlocked accessor functions to read and write PHY MMD registers and/or bitfields. Set of functions exactly matches what is already available for PHY legacy registers. Signed-off-by: Nikita Yushchenko Signed-off-by: Andrew Lunn Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- include/linux/phy.h | 134 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 111 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 70f83d0d7469..237dd035858a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -692,17 +692,6 @@ static inline bool phy_is_started(struct phy_device *phydev) void phy_resolve_aneg_linkmode(struct phy_device *phydev); -/** - * phy_read_mmd - Convenience function for reading a register - * from an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Same rules as for phy_read(); - */ -int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); - /** * phy_read - Convenience function for reading a given PHY register * @phydev: the phy_device struct @@ -758,9 +747,60 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) val); } +/** + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for phy_read(); + */ +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); + +/** + * __phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for __phy_read(); + */ +int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); + +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to write to + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); + +/** + * __phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to write to + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for __phy_write(); + */ +int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); + int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set); +int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set); + /** * __phy_set_bits - Convenience function for setting bits in a PHY register * @phydev: the phy_device struct @@ -810,6 +850,66 @@ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) return phy_modify(phydev, regnum, val, 0); } +/** + * __phy_set_bits_mmd - Convenience function for setting bits in a register + * on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @val: bits to set + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad, + u32 regnum, u16 val) +{ + return __phy_modify_mmd(phydev, devad, regnum, 0, val); +} + +/** + * __phy_clear_bits_mmd - Convenience function for clearing bits in a register + * on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @val: bits to clear + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad, + u32 regnum, u16 val) +{ + return __phy_modify_mmd(phydev, devad, regnum, val, 0); +} + +/** + * phy_set_bits_mmd - Convenience function for setting bits in a register + * on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @val: bits to set + */ +static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad, + u32 regnum, u16 val) +{ + return phy_modify_mmd(phydev, devad, regnum, 0, val); +} + +/** + * phy_clear_bits_mmd - Convenience function for clearing bits in a register + * on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @val: bits to clear + */ +static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad, + u32 regnum, u16 val) +{ + return phy_modify_mmd(phydev, devad, regnum, val, 0); +} + /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct @@ -886,18 +986,6 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) return phydev->is_pseudo_fixed_link; } -/** - * phy_write_mmd - Convenience function for writing a register - * on an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * @val: value to write to @regnum - * - * Same rules as for phy_write(); - */ -int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); - int phy_save_page(struct phy_device *phydev); int phy_select_page(struct phy_device *phydev, int page); int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); -- cgit v1.2.3 From fd9dc93e36231fb6d520e0edd467058fad4fd12d Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 6 Feb 2019 13:07:11 -0500 Subject: XArray: Change xa_insert to return -EBUSY Userspace translates EEXIST to "File exists" which isn't a very good error message for the problem. "Device or resource busy" is a better indication of what went wrong. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index e11841537631..57cf35c4d094 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -664,7 +664,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, @@ -693,7 +693,7 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, @@ -722,7 +722,7 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, -- cgit v1.2.3 From 3ccaf57a6a63ad171a951dcaddffc453b2414c7b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 26 Oct 2018 14:43:22 -0400 Subject: XArray: Add support for 1s-based allocation A lot of places want to allocate IDs starting at 1 instead of 0. While the xa_alloc() API supports this, it's not very efficient if lots of IDs are allocated, due to having to walk down to the bottom of the tree to see if ID 1 is available, then all the way over to the next non-allocated ID. This method marks ID 0 as being occupied which wastes one slot in the XArray, but preserves xa_empty() as working. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 57cf35c4d094..99dd0838b4ba 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -220,10 +220,13 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) +/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) +#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) /** * struct xarray - The anchor of the XArray. @@ -279,7 +282,7 @@ struct xarray { #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) /** - * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. * @name: A string that names your XArray. * * This is intended for file scope definitions of allocating XArrays. @@ -287,6 +290,15 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) +/** + * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) + void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); -- cgit v1.2.3 From a3e4d3f97ec844de005a679585c04c5c03dfbdb6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 31 Dec 2018 10:41:01 -0500 Subject: XArray: Redesign xa_alloc API It was too easy to forget to initialise the start index. Add an xa_limit data structure which can be used to pass min & max, and define a couple of special values for common cases. Also add some more tests cribbed from the IDR test suite. Change the return value from -ENOSPC to -EBUSY to match xa_insert(). Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 80 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 99dd0838b4ba..883bb958e462 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -200,6 +200,27 @@ static inline int xa_err(void *entry) return 0; } +/** + * struct xa_limit - Represents a range of IDs. + * @min: The lowest ID to allocate (inclusive). + * @max: The maximum ID to allocate (inclusive). + * + * This structure is used either directly or via the XA_LIMIT() macro + * to communicate the range of IDs that are valid for allocation. + * Two common ranges are predefined for you: + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] + */ +struct xa_limit { + u32 max; + u32 min; +}; + +#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } + +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) + typedef unsigned __bitwise xa_mark_t; #define XA_MARK_0 ((__force xa_mark_t)0U) #define XA_MARK_1 ((__force xa_mark_t)1U) @@ -476,7 +497,8 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); -int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -753,26 +775,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock. May sleep if + * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); return err; @@ -782,26 +804,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_bh() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_bh(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); return err; @@ -811,26 +833,26 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_irq() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_irq(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); return err; -- cgit v1.2.3 From 2fa044e51a1f35d7b04cbde07ec513b0ba195e38 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 6 Nov 2018 14:13:35 -0500 Subject: XArray: Add cyclic allocation This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 102 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 883bb958e462..5ed6b462e754 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -242,6 +242,7 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) +#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) @@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); +int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -858,6 +861,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, return err; } +/** + * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_irq(xa); + + return err; +} + /** * xa_reserve() - Reserve this index in the XArray. * @xa: XArray. -- cgit v1.2.3 From 60b8f0ddf1a927ef02141a6610fd52575134f821 Mon Sep 17 00:00:00 2001 From: Phil Edworthy Date: Mon, 3 Dec 2018 11:13:09 +0000 Subject: clk: Add (devm_)clk_get_optional() functions This adds clk_get_optional() and devm_clk_get_optional() functions to get optional clocks. They behave the same as (devm_)clk_get() except where there is no clock producer. In this case, instead of returning -ENOENT, the function returns NULL. This makes error checking simpler and allows clk_prepare_enable, etc to be called on the returned reference without additional checks. Signed-off-by: Phil Edworthy Reviewed-by: Andy Shevchenko Cc: Russell King [sboyd@kernel.org: Document in devres.txt] Signed-off-by: Stephen Boyd --- include/linux/clk.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'include/linux') diff --git a/include/linux/clk.h b/include/linux/clk.h index a7773b5c0b9f..d8bc1a856b39 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -383,6 +383,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, */ struct clk *devm_clk_get(struct device *dev, const char *id); +/** + * devm_clk_get_optional - lookup and obtain a managed reference to an optional + * clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as devm_clk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns NULL. + */ +struct clk *devm_clk_get_optional(struct device *dev, const char *id); + /** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. @@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_clk_get_optional(struct device *dev, + const char *id) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { @@ -862,6 +879,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks, clk_bulk_unprepare(num_clks, clks); } +/** + * clk_get_optional - lookup and obtain a reference to an optional clock + * producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as clk_get() except where there is no clock producer. In + * this case, instead of returning -ENOENT, the function returns NULL. + */ +static inline struct clk *clk_get_optional(struct device *dev, const char *id) +{ + struct clk *clk = clk_get(dev, id); + + if (clk == ERR_PTR(-ENOENT)) + return NULL; + + return clk; +} + #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get(struct device_node *np, int index); struct clk *of_clk_get_by_name(struct device_node *np, const char *name); -- cgit v1.2.3 From 3eee6c7d119cd8563ad25898f94d6c1b514da548 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 7 Dec 2018 13:09:39 +0200 Subject: clkdev: add managed clkdev lookup registration Clkdev registration lacks of managed registration functions and it seems few drivers do not drop clkdev lookups at exit. Add devm_clk_hw_register_clkdev and devm_clk_release_clkdev to ease lookup releasing at exit. Signed-off-by: Matti Vaittinen Signed-off-by: Stephen Boyd --- include/linux/clkdev.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 4890ff033220..ccb32af5848b 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); +int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, + const char *con_id, const char *dev_id); +void devm_clk_release_clkdev(struct device *dev, const char *con_id, + const char *dev_id); #endif -- cgit v1.2.3 From eca4205f9ec3bea2d5aad0493c19f5d2675a20fc Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 2 Feb 2019 12:50:51 +0100 Subject: ethtool: add ethtool_rx_flow_spec to flow_rule structure translator This patch adds a function to translate the ethtool_rx_flow_spec structure to the flow_rule representation. This allows us to reuse code from the driver side given that both flower and ethtool_rx_flow interfaces use the same representation. This patch also includes support for the flow type flags FLOW_EXT, FLOW_MAC_EXT and FLOW_RSS. The ethtool_rx_flow_spec_input wrapper structure is used to convey the rss_context field, that is away from the ethtool_rx_flow_spec structure, and the ethtool_rx_flow_spec structure. Signed-off-by: Pablo Neira Ayuso Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/ethtool.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index afd9596ce636..19a8de5326fb 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -400,4 +400,19 @@ struct ethtool_ops { void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); }; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + unsigned long priv[0]; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_rx_flow_rule * +ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); +void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); + #endif /* _LINUX_ETHTOOL_H */ -- cgit v1.2.3 From d6abc5969463359c366d459247b90366fcd6f5c5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 6 Feb 2019 09:45:35 -0800 Subject: net: Introduce ndo_get_port_parent_id() In preparation for getting rid of switchdev_ops, create a dedicated NDO operation for getting the port's parent identifier. There are essentially two classes of drivers that need to implement getting the port's parent ID which are VF/PF drivers with a built-in switch, and pure switchdev drivers such as mlxsw, ocelot, dsa etc. We introduce a helper function: dev_get_port_parent_id() which supports recursion into the lower devices to obtain the first port's parent ID. Convert the bridge, core and ipv4 multicast routing code to check for such ndo_get_port_parent_id() and call the helper function when valid before falling back to switchdev_port_attr_get(). This will allow us to convert all relevant drivers in one go instead of having to implement both switchdev_port_attr_get() and ndo_get_port_parent_id() operations, then get rid of switchdev_port_attr_get(). Acked-by: Jiri Pirko Signed-off-by: Florian Fainelli Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ba57d0ba425e..1d95e634f3fe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1188,6 +1188,10 @@ struct dev_ifalias { * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * + * int (*ndo_get_port_parent_id)(struct net_device *dev, + * struct netdev_phys_item_id *ppid) + * Called to get the parent ID of the physical port of this device. + * * void (*ndo_udp_tunnel_add)(struct net_device *dev, * struct udp_tunnel_info *ti); * Called by UDP tunnel to notify a driver about the UDP port and socket @@ -1412,6 +1416,8 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + int (*ndo_get_port_parent_id)(struct net_device *dev, + struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); void (*ndo_udp_tunnel_add)(struct net_device *dev, @@ -3651,6 +3657,9 @@ int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); +int dev_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid, bool recurse); +bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, -- cgit v1.2.3 From 4d5f007eedb74d71a7bde2bff69b6a31ad8ab427 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 2 Jan 2019 13:28:47 +0100 Subject: time: make adjtime compat handling available for 32 bit We want to reuse the compat_timex handling on 32-bit architectures the same way we are using the compat handling for timespec when moving to 64-bit time_t. Move all definitions related to compat_timex out of the compat code into the normal timekeeping code, along with a rename to old_timex32, corresponding to the timespec/timeval structures, and make it controlled by CONFIG_COMPAT_32BIT_TIME, which 32-bit architectures will then select. Signed-off-by: Arnd Bergmann --- include/linux/compat.h | 35 ++--------------------------------- include/linux/time32.h | 32 +++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compat.h b/include/linux/compat.h index 056be0d03722..657ca6abd855 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -132,37 +132,6 @@ struct compat_tms { compat_clock_t tms_cstime; }; -struct compat_timex { - compat_uint_t modes; - compat_long_t offset; - compat_long_t freq; - compat_long_t maxerror; - compat_long_t esterror; - compat_int_t status; - compat_long_t constant; - compat_long_t precision; - compat_long_t tolerance; - struct old_timeval32 time; - compat_long_t tick; - compat_long_t ppsfreq; - compat_long_t jitter; - compat_int_t shift; - compat_long_t stabil; - compat_long_t jitcnt; - compat_long_t calcnt; - compat_long_t errcnt; - compat_long_t stbcnt; - compat_int_t tai; - - compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; - compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; - compat_int_t:32; compat_int_t:32; compat_int_t:32; -}; - -struct timex; -int compat_get_timex(struct timex *, const struct compat_timex __user *); -int compat_put_timex(struct compat_timex __user *, const struct timex *); - #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) typedef struct { @@ -808,7 +777,7 @@ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); -asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); +asmlinkage long compat_sys_adjtimex(struct old_timex32 __user *utp); /* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); @@ -911,7 +880,7 @@ asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, - struct compat_timex __user *tp); + struct old_timex32 __user *tp); asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags); asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, diff --git a/include/linux/time32.h b/include/linux/time32.h index 118b9977080c..820a22e2b98b 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -10,6 +10,7 @@ */ #include +#include #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) @@ -35,13 +36,42 @@ struct old_utimbuf32 { old_time32_t modtime; }; +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; +}; + extern int get_old_timespec32(struct timespec64 *, const void __user *); extern int put_old_timespec32(const struct timespec64 *, void __user *); extern int get_old_itimerspec32(struct itimerspec64 *its, const struct old_itimerspec32 __user *uits); extern int put_old_itimerspec32(const struct itimerspec64 *its, struct old_itimerspec32 __user *uits); - +struct timex; +int get_old_timex32(struct timex *, const struct old_timex32 __user *); +int put_old_timex32(struct old_timex32 __user *, const struct timex *); #if __BITS_PER_LONG == 64 -- cgit v1.2.3 From 2c620ff93d9fbd5d644760d4c21d389078ec1080 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Mon, 2 Jul 2018 22:44:20 -0700 Subject: time: Add struct __kernel_timex struct timex uses struct timeval internally. struct timeval is not y2038 safe. Introduce a new UAPI type struct __kernel_timex that is y2038 safe. struct __kernel_timex uses a timeval type that is similar to struct __kernel_timespec which preserves the same structure size across 32 bit and 64 bit ABIs. struct __kernel_timex also restructures other members of the structure to make the structure the same on 64 bit and 32 bit architectures. Note that struct __kernel_timex is the same as struct timex on a 64 bit architecture. The above solution is similar to other new y2038 syscalls that are being introduced: both 32 bit and 64 bit ABIs have a common entry, and the compat entry supports the old 32 bit syscall interface. Alternatives considered were: 1. Add new time type to struct timex that makes use of padded bits. This time type could be based on the struct __kernel_timespec. modes will use a flag to notify which time structure should be used internally. This needs some application level changes on both 64 bit and 32 bit architectures. Although 64 bit machines could continue to use the older timeval structure without any changes. 2. Add a new u8 type to struct timex that makes use of padded bits. This can be used to save higher order tv_sec bits. modes will use a flag to notify presence of such a type. This will need some application level changes on 32 bit architectures. 3. Add a new compat_timex structure that differs in only the size of the time type; keep rest of struct timex the same. This requires extra syscalls to manage all 3 cases on 64 bit architectures. This will not need any application level changes but will add more complexity from kernel side. Signed-off-by: Deepa Dinamani Signed-off-by: Arnd Bergmann --- include/linux/timex.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/timex.h b/include/linux/timex.h index 39c25dbebfe8..7f40e9e42ecc 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -53,6 +53,13 @@ #ifndef _LINUX_TIMEX_H #define _LINUX_TIMEX_H +/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path + * and 32-bit emulation. + */ +#ifndef CONFIG_64BIT_TIME +#define __kernel_timex timex +#endif + #include #define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ -- cgit v1.2.3 From 50b93f30f6d8672f9ec80e90af94d733f11a20e0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Jan 2019 17:34:39 +0100 Subject: time: fix sys_timer_settime prototype A small typo has crept into the y2038 conversion of the timer_settime system call. So far this was completely harmless, but once we start using the new version, this has to be fixed. Fixes: 6ff847350702 ("time: Change types to new y2038 safe __kernel_itimerspec") Signed-off-by: Arnd Bergmann --- include/linux/syscalls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 938d8908b9e0..baa4b70b02d3 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -591,7 +591,7 @@ asmlinkage long sys_timer_gettime(timer_t timer_id, asmlinkage long sys_timer_getoverrun(timer_t timer_id); asmlinkage long sys_timer_settime(timer_t timer_id, int flags, const struct __kernel_itimerspec __user *new_setting, - struct itimerspec __user *old_setting); + struct __kernel_itimerspec __user *old_setting); asmlinkage long sys_timer_delete(timer_t timer_id); asmlinkage long sys_clock_settime(clockid_t which_clock, const struct __kernel_timespec __user *tp); -- cgit v1.2.3 From 1a596398a3d75f966b75f428e992cf1f242f9a5b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 3 Jan 2019 21:12:39 +0100 Subject: sparc64: add custom adjtimex/clock_adjtime functions sparc64 is the only architecture on Linux that has a 'timeval' definition with a 32-bit tv_usec but a 64-bit tv_sec. This causes problems for sparc32 compat mode when we convert it to use the new __kernel_timex type that has the same layout as all other 64-bit architectures. To avoid adding sparc64 specific code into the generic adjtimex implementation, this adds a wrapper in the sparc64 system call handling that converts the sparc64 'timex' into the new '__kernel_timex'. At this point, the two structures are defined to be identical, but that will change in the next step once we convert sparc32. Signed-off-by: Arnd Bergmann --- include/linux/timex.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/timex.h b/include/linux/timex.h index 7f40e9e42ecc..a15e6aeb8d49 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -159,6 +159,8 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) extern int do_adjtimex(struct timex *); +extern int do_clock_adjtime(const clockid_t which_clock, struct timex * ktx); + extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); -- cgit v1.2.3 From ead25417f82ed7f8a21da4dcefc768169f7da884 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Mon, 2 Jul 2018 22:44:21 -0700 Subject: timex: use __kernel_timex internally struct timex is not y2038 safe. Replace all uses of timex with y2038 safe __kernel_timex. Note that struct __kernel_timex is an ABI interface definition. We could define a new structure based on __kernel_timex that is only available internally instead. Right now, there isn't a strong motivation for this as the structure is isolated to a few defined struct timex interfaces and such a structure would be exactly the same as struct timex. The patch was generated by the following coccinelle script: virtual patch @depends on patch forall@ identifier ts; expression e; @@ ( - struct timex ts; + struct __kernel_timex ts; | - struct timex ts = {}; + struct __kernel_timex ts = {}; | - struct timex ts = e; + struct __kernel_timex ts = e; | - struct timex *ts; + struct __kernel_timex *ts; | (memset \| copy_from_user \| copy_to_user \)(..., - sizeof(struct timex)) + sizeof(struct __kernel_timex)) ) @depends on patch forall@ identifier ts; identifier fn; @@ fn(..., - struct timex *ts, + struct __kernel_timex *ts, ...) { ... } @depends on patch forall@ identifier ts; identifier fn; @@ fn(..., - struct timex *ts) { + struct __kernel_timex *ts) { ... } Signed-off-by: Deepa Dinamani Cc: linux-alpha@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Arnd Bergmann --- include/linux/posix-clock.h | 2 +- include/linux/time32.h | 6 +++--- include/linux/timex.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 3a3bc71017d5..18674d7d5b1c 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -51,7 +51,7 @@ struct posix_clock; struct posix_clock_operations { struct module *owner; - int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); + int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx); int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); diff --git a/include/linux/time32.h b/include/linux/time32.h index 820a22e2b98b..0a1f302a1753 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -69,9 +69,9 @@ extern int get_old_itimerspec32(struct itimerspec64 *its, const struct old_itimerspec32 __user *uits); extern int put_old_itimerspec32(const struct itimerspec64 *its, struct old_itimerspec32 __user *uits); -struct timex; -int get_old_timex32(struct timex *, const struct old_timex32 __user *); -int put_old_timex32(struct old_timex32 __user *, const struct timex *); +struct __kernel_timex; +int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *); +int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *); #if __BITS_PER_LONG == 64 diff --git a/include/linux/timex.h b/include/linux/timex.h index a15e6aeb8d49..4aff9f0d1367 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -158,8 +158,8 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_FREQ (HZ) #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) -extern int do_adjtimex(struct timex *); -extern int do_clock_adjtime(const clockid_t which_clock, struct timex * ktx); +extern int do_adjtimex(struct __kernel_timex *); +extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); extern void hardpps(const struct timespec64 *, const struct timespec64 *); -- cgit v1.2.3 From 3876ced476c8ec17265d1739467e726ada88b660 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Mon, 2 Jul 2018 22:44:22 -0700 Subject: timex: change syscalls to use struct __kernel_timex struct timex is not y2038 safe. Switch all the syscall apis to use y2038 safe __kernel_timex. Note that sys_adjtimex() does not have a y2038 safe solution. C libraries can implement it by calling clock_adjtime(CLOCK_REALTIME, ...). Signed-off-by: Deepa Dinamani Signed-off-by: Arnd Bergmann --- include/linux/syscalls.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index baa4b70b02d3..09330d5bda0c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -54,7 +54,7 @@ struct __sysctl_args; struct sysinfo; struct timespec; struct timeval; -struct timex; +struct __kernel_timex; struct timezone; struct tms; struct utimbuf; @@ -695,7 +695,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __user *tz); asmlinkage long sys_settimeofday(struct timeval __user *tv, struct timezone __user *tz); -asmlinkage long sys_adjtimex(struct timex __user *txc_p); +asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p); /* kernel/timer.c */ asmlinkage long sys_getpid(void); @@ -870,7 +870,7 @@ asmlinkage long sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); asmlinkage long sys_clock_adjtime(clockid_t which_clock, - struct timex __user *tx); + struct __kernel_timex __user *tx); asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, -- cgit v1.2.3 From 8dabe7245bbc134f2cfcc12cde75c019dab924cc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Jan 2019 00:33:08 +0100 Subject: y2038: syscalls: rename y2038 compat syscalls A lot of system calls that pass a time_t somewhere have an implementation using a COMPAT_SYSCALL_DEFINEx() on 64-bit architectures, and have been reworked so that this implementation can now be used on 32-bit architectures as well. The missing step is to redefine them using the regular SYSCALL_DEFINEx() to get them out of the compat namespace and make it possible to build them on 32-bit architectures. Any system call that ends in 'time' gets a '32' suffix on its name for that version, while the others get a '_time32' suffix, to distinguish them from the normal version, which takes a 64-bit time argument in the future. In this step, only 64-bit architectures are changed, doing this rename first lets us avoid touching the 32-bit architectures twice. Acked-by: Catalin Marinas Signed-off-by: Arnd Bergmann --- include/linux/compat.h | 73 +++--------------------------------------------- include/linux/syscalls.h | 57 +++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compat.h b/include/linux/compat.h index 657ca6abd855..ebddcb6cfcf8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -520,11 +520,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, u32 __user *iocb); -asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, - compat_long_t min_nr, - compat_long_t nr, - struct io_event __user *events, - struct old_timespec32 __user *timeout); asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, @@ -617,7 +612,7 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, compat_size_t count); /* fs/select.c */ -asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, +asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timespec32 __user *tsp, @@ -627,7 +622,7 @@ asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, compat_ulong_t __user *exp, struct __kernel_timespec __user *tsp, void __user *sig); -asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, +asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, unsigned int nfds, struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, @@ -657,19 +652,6 @@ asmlinkage long compat_sys_newfstat(unsigned int fd, /* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ -/* fs/timerfd.c */ -asmlinkage long compat_sys_timerfd_gettime(int ufd, - struct old_itimerspec32 __user *otmr); -asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, - const struct old_itimerspec32 __user *utmr, - struct old_itimerspec32 __user *otmr); - -/* fs/utimes.c */ -asmlinkage long compat_sys_utimensat(unsigned int dfd, - const char __user *filename, - struct old_timespec32 __user *t, - int flags); - /* kernel/exit.c */ asmlinkage long compat_sys_waitid(int, compat_pid_t, struct compat_siginfo __user *, int, @@ -678,9 +660,6 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, /* kernel/futex.c */ -asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, - struct old_timespec32 __user *utime, u32 __user *uaddr2, - u32 val3); asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, compat_size_t len); @@ -688,10 +667,6 @@ asmlinkage long compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); -/* kernel/hrtimer.c */ -asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); - /* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it); @@ -709,20 +684,6 @@ asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); -asmlinkage long compat_sys_timer_gettime(timer_t timer_id, - struct old_itimerspec32 __user *setting); -asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, - struct old_itimerspec32 __user *new, - struct old_itimerspec32 __user *old); -asmlinkage long compat_sys_clock_settime(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_getres(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, - struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); /* kernel/ptrace.c */ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, @@ -735,8 +696,6 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); -asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, @@ -754,7 +713,7 @@ asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, +asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct old_timespec32 __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, @@ -777,7 +736,6 @@ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); -asmlinkage long compat_sys_adjtimex(struct old_timex32 __user *utp); /* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); @@ -786,14 +744,6 @@ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); asmlinkage long compat_sys_mq_open(const char __user *u_name, int oflag, compat_mode_t mode, struct compat_mq_attr __user *u_attr); -asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, - const char __user *u_msg_ptr, - compat_size_t msg_len, unsigned int msg_prio, - const struct old_timespec32 __user *u_abs_timeout); -asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, - char __user *u_msg_ptr, - compat_size_t msg_len, unsigned int __user *u_msg_prio, - const struct old_timespec32 __user *u_abs_timeout); asmlinkage long compat_sys_mq_notify(mqd_t mqdes, const struct compat_sigevent __user *u_notification); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, @@ -809,8 +759,6 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, /* ipc/sem.c */ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); -asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsems, const struct old_timespec32 __user *timeout); /* ipc/shm.c */ asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); @@ -868,7 +816,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct __kernel_timespec __user *timeout); -asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, +asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct old_timespec32 __user *timeout); asmlinkage long compat_sys_wait4(compat_pid_t pid, @@ -879,8 +827,6 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); -asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, - struct old_timex32 __user *tp); asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags); asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, @@ -921,8 +867,6 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd, /* __ARCH_WANT_SYSCALL_NO_AT */ asmlinkage long compat_sys_open(const char __user *filename, int flags, umode_t mode); -asmlinkage long compat_sys_utimes(const char __user *filename, - struct old_timeval32 __user *t); /* __ARCH_WANT_SYSCALL_NO_FLAGS */ asmlinkage long compat_sys_signalfd(int ufd, @@ -936,12 +880,6 @@ asmlinkage long compat_sys_newlstat(const char __user *filename, struct compat_stat __user *statbuf); /* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long compat_sys_time(old_time32_t __user *tloc); -asmlinkage long compat_sys_utime(const char __user *filename, - struct old_utimbuf32 __user *t); -asmlinkage long compat_sys_futimesat(unsigned int dfd, - const char __user *filename, - struct old_timeval32 __user *t); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timeval32 __user *tvp); @@ -976,9 +914,6 @@ asmlinkage long compat_sys_sigaction(int sig, struct compat_old_sigaction __user *oact); #endif -/* obsolete: kernel/time/time.c */ -asmlinkage long compat_sys_stime(old_time32_t __user *tptr); - /* obsolete: net/socket.c */ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 09330d5bda0c..94369f5bd8e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -297,6 +297,11 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id, long nr, struct io_event __user *events, struct __kernel_timespec __user *timeout); +asmlinkage long sys_io_getevents_time32(__u32 ctx_id, + __s32 min_nr, + __s32 nr, + struct io_event __user *events, + struct old_timespec32 __user *timeout); asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, long min_nr, long nr, @@ -522,11 +527,19 @@ asmlinkage long sys_timerfd_settime(int ufd, int flags, const struct __kernel_itimerspec __user *utmr, struct __kernel_itimerspec __user *otmr); asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime32(int ufd, + struct old_itimerspec32 __user *otmr); +asmlinkage long sys_timerfd_settime32(int ufd, int flags, + const struct old_itimerspec32 __user *utmr, + struct old_itimerspec32 __user *otmr); /* fs/utimes.c */ asmlinkage long sys_utimensat(int dfd, const char __user *filename, struct __kernel_timespec __user *utimes, int flags); +asmlinkage long sys_utimensat_time32(unsigned int dfd, + const char __user *filename, + struct old_timespec32 __user *t, int flags); /* kernel/acct.c */ asmlinkage long sys_acct(const char __user *name); @@ -555,6 +568,9 @@ asmlinkage long sys_unshare(unsigned long unshare_flags); asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct __kernel_timespec __user *utime, u32 __user *uaddr2, u32 val3); +asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val, + struct old_timespec32 __user *utime, u32 __user *uaddr2, + u32 val3); asmlinkage long sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, size_t __user *len_ptr); @@ -564,6 +580,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, /* kernel/hrtimer.c */ asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); +asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/itimer.c */ asmlinkage long sys_getitimer(int which, struct itimerval __user *value); @@ -602,6 +620,20 @@ asmlinkage long sys_clock_getres(clockid_t which_clock, asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, const struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); +asmlinkage long sys_timer_gettime32(timer_t timer_id, + struct old_itimerspec32 __user *setting); +asmlinkage long sys_timer_settime32(timer_t timer_id, int flags, + struct old_itimerspec32 __user *new, + struct old_itimerspec32 __user *old); +asmlinkage long sys_clock_settime32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_gettime32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_getres_time32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags, + struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/printk.c */ asmlinkage long sys_syslog(int type, char __user *buf, int len); @@ -627,6 +659,8 @@ asmlinkage long sys_sched_get_priority_max(int policy); asmlinkage long sys_sched_get_priority_min(int policy); asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct __kernel_timespec __user *interval); +asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid, + struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long sys_restart_syscall(void); @@ -696,6 +730,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, asmlinkage long sys_settimeofday(struct timeval __user *tv, struct timezone __user *tz); asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p); +asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p); /* kernel/timer.c */ asmlinkage long sys_getpid(void); @@ -714,6 +749,14 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout); asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); +asmlinkage long sys_mq_timedreceive_time32(mqd_t mqdes, + char __user *u_msg_ptr, + unsigned int msg_len, unsigned int __user *u_msg_prio, + const struct old_timespec32 __user *u_abs_timeout); +asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes, + const char __user *u_msg_ptr, + unsigned int msg_len, unsigned int msg_prio, + const struct old_timespec32 __user *u_abs_timeout); /* ipc/msg.c */ asmlinkage long sys_msgget(key_t key, int msgflg); @@ -731,6 +774,9 @@ asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, unsigned nsops, const struct __kernel_timespec __user *timeout); +asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops, + unsigned nsops, + const struct old_timespec32 __user *timeout); asmlinkage long sys_semop(int semid, struct sembuf __user *sops, unsigned nsops); @@ -871,6 +917,8 @@ asmlinkage long sys_open_by_handle_at(int mountdirfd, int flags); asmlinkage long sys_clock_adjtime(clockid_t which_clock, struct __kernel_timex __user *tx); +asmlinkage long sys_clock_adjtime32(clockid_t which_clock, + struct old_timex32 __user *tx); asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, @@ -1006,6 +1054,7 @@ asmlinkage long sys_alarm(unsigned int seconds); asmlinkage long sys_getpgrp(void); asmlinkage long sys_pause(void); asmlinkage long sys_time(time_t __user *tloc); +asmlinkage long sys_time32(old_time32_t __user *tloc); #ifdef __ARCH_WANT_SYS_UTIME asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times); @@ -1014,6 +1063,13 @@ asmlinkage long sys_utimes(char __user *filename, asmlinkage long sys_futimesat(int dfd, const char __user *filename, struct timeval __user *utimes); #endif +asmlinkage long sys_futimesat_time32(unsigned int dfd, + const char __user *filename, + struct old_timeval32 __user *t); +asmlinkage long sys_utime32(const char __user *filename, + struct old_utimbuf32 __user *t); +asmlinkage long sys_utimes_time32(const char __user *filename, + struct old_timeval32 __user *t); asmlinkage long sys_creat(const char __user *pathname, umode_t mode); asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user *dirent, @@ -1038,6 +1094,7 @@ asmlinkage long sys_fork(void); /* obsolete: kernel/time/time.c */ asmlinkage long sys_stime(time_t __user *tptr); +asmlinkage long sys_stime32(old_time32_t __user *tptr); /* obsolete: kernel/signal.c */ asmlinkage long sys_sigpending(old_sigset_t __user *uset); -- cgit v1.2.3 From c70a772fda11570ebddecbce1543a3fda008db4a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Jan 2019 00:00:34 +0100 Subject: y2038: remove struct definition redirects We now use 64-bit time_t on all architectures, so the __kernel_timex, __kernel_timeval and __kernel_timespec redirects can be removed after having served their purpose. This makes it all much less confusing, as the __kernel_* types now always refer to the same layout based on 64-bit time_t across all 32-bit and 64-bit architectures. Signed-off-by: Arnd Bergmann --- include/linux/time64.h | 8 -------- include/linux/timex.h | 7 ------- 2 files changed, 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/time64.h b/include/linux/time64.h index 05634afba0db..f38d382ffec1 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -7,14 +7,6 @@ typedef __s64 time64_t; typedef __u64 timeu64_t; -/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path - * and 32-bit emulation. - */ -#ifndef CONFIG_64BIT_TIME -#define __kernel_timespec timespec -#define __kernel_itimerspec itimerspec -#endif - #include struct timespec64 { diff --git a/include/linux/timex.h b/include/linux/timex.h index 4aff9f0d1367..ce0859763670 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -53,13 +53,6 @@ #ifndef _LINUX_TIMEX_H #define _LINUX_TIMEX_H -/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path - * and 32-bit emulation. - */ -#ifndef CONFIG_64BIT_TIME -#define __kernel_timex timex -#endif - #include #define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ -- cgit v1.2.3 From a4f342b9607d8c2034d3135cbbb11b4028be3678 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Mon, 4 Feb 2019 11:09:48 +0000 Subject: PM / OPP: Introduce a power estimation helper The Energy Model (EM) framework provides an API to let drivers register the active power of CPUs. The drivers are expected to provide a callback method which estimates the power consumed by a CPU at each available performance levels. How exactly this should be implemented, however, depends on the platform. On some systems, PM_OPP knows the voltage and frequency at which CPUs can run. When coupled with the CPU 'capacitance' (as provided by the 'dynamic-power-coefficient' devicetree binding), it is possible to estimate the dynamic power consumption of a CPU as P = C * V^2 * f, with C its capacitance and V and f respectively the voltage and frequency of the OPP. The Intelligent Power Allocator (IPA) thermal governor already implements that estimation method, in the thermal framework. However, this power estimation method can be applied to any platform where all the parameters are known (C, V and f), and not only those suffering thermal issues. As such, the code implementing this feature can be re-used to also populate the EM framework now used by EAS. As a first step, introduce in PM_OPP a helper function which CPUFreq drivers can use to register into the EM framework. This duplicates the power estimation done in IPA until it can be migrated to using the EM framework. This will be done later, once the EM framework has support for at least all platforms currently supported by IPA. Signed-off-by: Quentin Perret Tested-by: Matthias Kaehlcke Reviewed-by: Matthias Kaehlcke Signed-off-by: Viresh Kumar --- include/linux/pm_opp.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0a2a88e5a383..1470c57933cf 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -322,6 +322,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); +void dev_pm_opp_of_register_em(struct cpumask *cpus); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -360,6 +361,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) { return NULL; } + +static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) +{ +} + static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { return -ENOTSUPP; -- cgit v1.2.3 From 752b5da2359fee342d5264e2c10352daf5b9a199 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 21 Jan 2019 16:45:46 +0100 Subject: phy: dphy: Remove unused header The videomode.h header inclusion is an artifact from the patches development, remove it. Suggested-by: Sakari Ailus Acked-by: Sakari Ailus Signed-off-by: Maxime Ripard Signed-off-by: Kishon Vijay Abraham I --- include/linux/phy/phy-mipi-dphy.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h index c08aacc0ac35..9cf97cd1d303 100644 --- a/include/linux/phy/phy-mipi-dphy.h +++ b/include/linux/phy/phy-mipi-dphy.h @@ -6,8 +6,6 @@ #ifndef __PHY_MIPI_DPHY_H_ #define __PHY_MIPI_DPHY_H_ -#include