From cf5868c8a22dc2854b96e9569064bb92365549ca Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Fri, 8 Sep 2017 20:57:10 +0200 Subject: padata: ensure the reorder timer callback runs on the correct CPU The reorder timer function runs on the CPU where the timer interrupt was handled which is not necessarily one of the CPUs of the 'pcpu' CPU mask set. Ensure the padata_reorder() callback runs on the correct CPU, which is one in the 'pcpu' CPU mask set and, preferrably, the next expected one. Do so by comparing the current CPU with the expected target CPU. If they match, call padata_reorder() right away. If they differ, schedule a work item on the target CPU that does the padata_reorder() call for us. Signed-off-by: Mathias Krause Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/padata.h b/include/linux/padata.h index 2f9c1f93b1ce..5c0175bbc179 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -85,6 +85,7 @@ struct padata_serial_queue { * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. * @work: work struct for parallelization. + * @reorder_work: work struct for reordering. * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ @@ -93,6 +94,7 @@ struct padata_parallel_queue { struct padata_list reorder; struct parallel_data *pd; struct work_struct work; + struct work_struct reorder_work; atomic_t num_obj; int cpu_index; }; -- cgit v1.2.3 From 350ef88e7e922354f82a931897ad4a4ce6c686ff Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Fri, 8 Sep 2017 20:57:11 +0200 Subject: padata: ensure padata_do_serial() runs on the correct CPU If the algorithm we're parallelizing is asynchronous we might change CPUs between padata_do_parallel() and padata_do_serial(). However, we don't expect this to happen as we need to enqueue the padata object into the per-cpu reorder queue we took it from, i.e. the same-cpu's parallel queue. Ensure we're not switching CPUs for a given padata object by tracking the CPU within the padata object. If the serial callback gets called on the wrong CPU, defer invoking padata_reorder() via a kernel worker on the CPU we're expected to run on. Signed-off-by: Mathias Krause Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/padata.h b/include/linux/padata.h index 5c0175bbc179..5d13d25da2c8 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -37,6 +37,7 @@ * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. * @cb_cpu: Callback cpu for serializatioon. + * @cpu: Cpu for parallelization. * @seq_nr: Sequence number of the parallelized data object. * @info: Used to pass information from the parallel to the serial function. * @parallel: Parallel execution function. @@ -46,6 +47,7 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; + int cpu; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); -- cgit v1.2.3 From ada69a1639eca54ff74d839a6513c43db8d57d70 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Wed, 18 Oct 2017 08:00:38 +0100 Subject: crypto: introduce crypto wait for async op Invoking a possibly async. crypto op and waiting for completion while correctly handling backlog processing is a common task in the crypto API implementation and outside users of it. This patch adds a generic implementation for doing so in preparation for using it across the board instead of hand rolled versions. Signed-off-by: Gilad Ben-Yossef CC: Eric Biggers CC: Jonathan Cameron Signed-off-by: Herbert Xu --- include/linux/crypto.h | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da9978e951..78508ca4b108 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #include #include #include +#include /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing @@ -467,6 +468,45 @@ struct crypto_alg { struct module *cra_module; } CRYPTO_MINALIGN_ATTR; +/* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + /* * Algorithm registration interface. */ -- cgit v1.2.3