From d6104733178293b40044525b06d6a26356934da3 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Tue, 14 Jan 2025 13:36:34 +0100 Subject: spinlock: extend guard with spinlock_bh variants Extend guard APIs with missing raw/spinlock_bh variants. Signed-off-by: Christian Marangi Acked-by: Peter Zijlstra (Intel) Signed-off-by: Herbert Xu --- include/linux/spinlock.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 63dd8cf3c3c2..d3561c4a080e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -548,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, + raw_spin_lock_bh(_T->lock), + raw_spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), @@ -569,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, + spin_lock_bh(_T->lock), + spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, + spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), -- cgit v1.2.3 From f4144b6bb74cc358054041e7b062bc9354c59e6c Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sun, 2 Feb 2025 20:00:51 +0100 Subject: crypto: sig - Prepare for algorithms with variable signature size The callers of crypto_sig_sign() assume that the signature size is always equivalent to the key size. This happens to be true for RSA, which is currently the only algorithm implementing the ->sign() callback. But it is false e.g. for X9.62 encoded ECDSA signatures because they have variable length. Prepare for addition of a ->sign() callback to such algorithms by letting the callback return the signature size (or a negative integer on error). When testing the ->sign() callback in test_sig_one(), use crypto_sig_maxsize() instead of crypto_sig_keysize() to verify that the test vector's signature does not exceed an algorithm's maximum signature size. There has been a relatively recent effort to upstream ECDSA signature generation support which may benefit from this change: https://lore.kernel.org/linux-crypto/20220908200036.2034-1-ignat@cloudflare.com/ However the main motivation for this commit is to reduce the number of crypto_sig_keysize() callers: This function is about to be changed to return the size in bits instead of bytes and that will require amending most callers to divide the return value by 8. Signed-off-by: Lukas Wunner Reviewed-by: Stefan Berger Cc: Ignat Korchagin Signed-off-by: Herbert Xu --- include/crypto/sig.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/crypto/sig.h b/include/crypto/sig.h index cff41ad93824..11024708c069 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -23,7 +23,8 @@ struct crypto_sig { * struct sig_alg - generic public key signature algorithm * * @sign: Function performs a sign operation as defined by public key - * algorithm. Optional. + * algorithm. On success, the signature size is returned. + * Optional. * @verify: Function performs a complete verify operation as defined by * public key algorithm, returning verification status. Optional. * @set_pub_key: Function invokes the algorithm specific set public key @@ -186,7 +187,7 @@ static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm) * @dst: destination obuffer * @dlen: destination length * - * Return: zero on success; error code in case of error + * Return: signature size on success; error code in case of error */ static inline int crypto_sig_sign(struct crypto_sig *tfm, const void *src, unsigned int slen, -- cgit v1.2.3 From b16510a530d1e6ab9683f04f8fb34f2e0f538275 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sun, 2 Feb 2025 20:00:52 +0100 Subject: crypto: ecdsa - Harden against integer overflows in DIV_ROUND_UP() Herbert notes that DIV_ROUND_UP() may overflow unnecessarily if an ecdsa implementation's ->key_size() callback returns an unusually large value. Herbert instead suggests (for a division by 8): X / 8 + !!(X & 7) Based on this formula, introduce a generic DIV_ROUND_UP_POW2() macro and use it in lieu of DIV_ROUND_UP() for ->key_size() return values. Additionally, use the macro in ecc_digits_from_bytes(), whose "nbytes" parameter is a ->key_size() return value in some instances, or a user-specified ASN.1 length in the case of ecdsa_get_signature_rs(). Link: https://lore.kernel.org/r/Z3iElsILmoSu6FuC@gondor.apana.org.au/ Signed-off-by: Lukas Wunner Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- include/linux/math.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/math.h b/include/linux/math.h index f5f18dc3616b..0198c92cbe3e 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -34,6 +34,18 @@ */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) +/** + * DIV_ROUND_UP_POW2 - divide and round up + * @n: numerator + * @d: denominator (must be a power of 2) + * + * Divides @n by @d and rounds up to next multiple of @d (which must be a power + * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). + * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). + */ +#define DIV_ROUND_UP_POW2(n, d) \ + ((n) / (d) + !!((n) & ((d) - 1))) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ -- cgit v1.2.3 From 849d9db170fc8a03ce9f64133a1d0cd46c135105 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Tue, 4 Feb 2025 16:35:46 +0100 Subject: dt-bindings: reset: Add SCMI reset IDs for RK3588 When TF-A is used to assert/deassert the resets through SCMI, the IDs communicated to it are different than the ones mainline Linux uses. Import the list of SCMI reset IDs from mainline TF-A so that devicetrees can use these IDs more easily. Co-developed-by: XiaoDong Huang Signed-off-by: XiaoDong Huang Acked-by: Conor Dooley Signed-off-by: Nicolas Frattaroli Signed-off-by: Herbert Xu --- include/dt-bindings/reset/rockchip,rk3588-cru.h | 41 ++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h index e2fe4bd5f7f0..878beae6dc3b 100644 --- a/include/dt-bindings/reset/rockchip,rk3588-cru.h +++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* - * Copyright (c) 2021 Rockchip Electronics Co. Ltd. + * Copyright (c) 2021, 2024 Rockchip Electronics Co. Ltd. * Copyright (c) 2022 Collabora Ltd. * * Author: Elaine Zhang @@ -753,4 +753,43 @@ #define SRST_A_HDMIRX_BIU 660 +/* SCMI Secure Resets */ + +/* Name=SECURE_SOFTRST_CON00,Offset=0xA00 */ +#define SCMI_SRST_A_SECURE_NS_BIU 10 +#define SCMI_SRST_H_SECURE_NS_BIU 11 +#define SCMI_SRST_A_SECURE_S_BIU 12 +#define SCMI_SRST_H_SECURE_S_BIU 13 +#define SCMI_SRST_P_SECURE_S_BIU 14 +#define SCMI_SRST_CRYPTO_CORE 15 +/* Name=SECURE_SOFTRST_CON01,Offset=0xA04 */ +#define SCMI_SRST_CRYPTO_PKA 16 +#define SCMI_SRST_CRYPTO_RNG 17 +#define SCMI_SRST_A_CRYPTO 18 +#define SCMI_SRST_H_CRYPTO 19 +#define SCMI_SRST_KEYLADDER_CORE 25 +#define SCMI_SRST_KEYLADDER_RNG 26 +#define SCMI_SRST_A_KEYLADDER 27 +#define SCMI_SRST_H_KEYLADDER 28 +#define SCMI_SRST_P_OTPC_S 29 +#define SCMI_SRST_OTPC_S 30 +#define SCMI_SRST_WDT_S 31 +/* Name=SECURE_SOFTRST_CON02,Offset=0xA08 */ +#define SCMI_SRST_T_WDT_S 32 +#define SCMI_SRST_H_BOOTROM 33 +#define SCMI_SRST_A_DCF 34 +#define SCMI_SRST_P_DCF 35 +#define SCMI_SRST_H_BOOTROM_NS 37 +#define SCMI_SRST_P_KEYLADDER 46 +#define SCMI_SRST_H_TRNG_S 47 +/* Name=SECURE_SOFTRST_CON03,Offset=0xA0C */ +#define SCMI_SRST_H_TRNG_NS 48 +#define SCMI_SRST_D_SDMMC_BUFFER 49 +#define SCMI_SRST_H_SDMMC 50 +#define SCMI_SRST_H_SDMMC_BUFFER 51 +#define SCMI_SRST_SDMMC 52 +#define SCMI_SRST_P_TRNG_CHK 53 +#define SCMI_SRST_TRNG_S 54 + + #endif -- cgit v1.2.3 From 12a2b40d49c1e11f35fa39e4363ef4f175b0330c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Feb 2025 14:02:08 +0800 Subject: crypto: skcipher - Set tfm in SYNC_SKCIPHER_REQUEST_ON_STACK Set the request tfm directly in SYNC_SKCIPHER_REQUEST_ON_STACK since the tfm is already available. Signed-off-by: Herbert Xu --- include/crypto/skcipher.h | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 18a86e0af016..9e5853464345 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -214,16 +214,17 @@ struct lskcipher_alg { #define MAX_SYNC_SKCIPHER_REQSIZE 384 /* - * This performs a type-check against the "tfm" argument to make sure + * This performs a type-check against the "_tfm" argument to make sure * all users have the correct skcipher tfm for doing on-stack requests. */ -#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ - MAX_SYNC_SKCIPHER_REQSIZE + \ - (!(sizeof((struct crypto_sync_skcipher *)1 == \ - (typeof(tfm))1))) \ + MAX_SYNC_SKCIPHER_REQSIZE \ ] CRYPTO_MINALIGN_ATTR; \ - struct skcipher_request *name = (void *)__##name##_desc + struct skcipher_request *name = \ + (((struct skcipher_request *)__##name##_desc)->base.tfm = \ + crypto_sync_skcipher_tfm((_tfm)), \ + (void *)__##name##_desc) /** * DOC: Symmetric Key Cipher API @@ -311,6 +312,12 @@ static inline struct crypto_tfm *crypto_lskcipher_tfm( return &tfm->base; } +static inline struct crypto_tfm *crypto_sync_skcipher_tfm( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_tfm(&tfm->base); +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed -- cgit v1.2.3 From 075db21426b17609e2374274751b761e35d39664 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:12 +0800 Subject: crypto: ahash - Only save callback and data in ahash_save_req As unaligned operations are supported by the underlying algorithm, ahash_save_req and ahash_restore_req can be greatly simplified to only preserve the callback and data. Signed-off-by: Herbert Xu --- include/crypto/hash.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 2d5ea9f9ff43..9c1f8ca59a77 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -55,9 +55,6 @@ struct ahash_request { struct scatterlist *src; u8 *result; - /* This field may only be used by the ahash API code. */ - void *priv; - void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -- cgit v1.2.3 From f2ffe5a9183d22eec718edac03e8bfcedf4dee70 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:17 +0800 Subject: crypto: hash - Add request chaining API This adds request chaining to the ahash interface. Request chaining allows multiple requests to be submitted in one shot. An algorithm can elect to receive chained requests by setting the flag CRYPTO_ALG_REQ_CHAIN. If this bit is not set, the API will break up chained requests and submit them one-by-one. A new err field is added to struct crypto_async_request to record the return value for each individual request. Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 11 +++++++++++ include/crypto/hash.h | 28 ++++++++++++++++++---------- include/crypto/internal/hash.h | 10 ++++++++++ include/linux/crypto.h | 24 ++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 156de41ca760..11065978d360 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -271,4 +272,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } +static inline bool crypto_request_chained(struct crypto_async_request *req) +{ + return !list_empty(&req->list); +} + +static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 9c1f8ca59a77..0a6f744ce4a1 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -572,16 +572,7 @@ static inline struct ahash_request *ahash_request_alloc_noprof( * ahash_request_free() - zeroize and free the request data structure * @req: request data structure cipher handle to be freed */ -static inline void ahash_request_free(struct ahash_request *req) -{ - kfree_sensitive(req); -} - -static inline void ahash_request_zero(struct ahash_request *req) -{ - memzero_explicit(req, sizeof(*req) + - crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); -} +void ahash_request_free(struct ahash_request *req); static inline struct ahash_request *ahash_request_cast( struct crypto_async_request *req) @@ -622,6 +613,7 @@ static inline void ahash_request_set_callback(struct ahash_request *req, req->base.complete = compl; req->base.data = data; req->base.flags = flags; + crypto_reqchain_init(&req->base); } /** @@ -646,6 +638,12 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->result = result; } +static inline void ahash_request_chain(struct ahash_request *req, + struct ahash_request *head) +{ + crypto_request_chain(&req->base, &head->base); +} + /** * DOC: Synchronous Message Digest API * @@ -947,4 +945,14 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } +static inline int ahash_request_err(struct ahash_request *req) +{ + return req->base.err; +} + +static inline bool ahash_is_async(struct crypto_ahash *tfm) +{ + return crypto_tfm_is_async(&tfm->base); +} + #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 84da3424decc..36425ecd2c37 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -247,5 +247,15 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } +static inline bool ahash_request_chained(struct ahash_request *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b164da5e129e..1d2a6c515d58 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -13,6 +13,8 @@ #define _LINUX_CRYPTO_H #include +#include +#include #include #include #include @@ -124,6 +126,9 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 +/* Set if the algorithm supports request chains. */ +#define CRYPTO_ALG_REQ_CHAIN 0x00040000 + /* * Transform masks and values (for crt_flags). */ @@ -174,6 +179,7 @@ struct crypto_async_request { struct crypto_tfm *tfm; u32 flags; + int err; }; /** @@ -540,5 +546,23 @@ int crypto_comp_decompress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); +static inline void crypto_reqchain_init(struct crypto_async_request *req) +{ + req->err = -EINPROGRESS; + INIT_LIST_HEAD(&req->list); +} + +static inline void crypto_request_chain(struct crypto_async_request *req, + struct crypto_async_request *head) +{ + req->err = -EINPROGRESS; + list_add_tail(&req->list, &head->list); +} + +static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; +} + #endif /* _LINUX_CRYPTO_H */ -- cgit v1.2.3 From 439963cdc3aa447dfd3b5abc05fcd25cef4d22dc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:22 +0800 Subject: crypto: ahash - Add virtual address support This patch adds virtual address support to ahash. Virtual addresses were previously only supported through shash. The user may choose to use virtual addresses with ahash by calling ahash_request_set_virt instead of ahash_request_set_crypt. The API will take care of translating this to an SG list if necessary, unless the algorithm declares that it supports chaining. Therefore in order for an ahash algorithm to support chaining, it must also support virtual addresses directly. Signed-off-by: Herbert Xu --- include/crypto/hash.h | 38 ++++++++++++++++++++++++++++++++++++-- include/crypto/internal/hash.h | 5 +++++ include/linux/crypto.h | 2 +- 3 files changed, 42 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0a6f744ce4a1..4e87e39679cb 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -12,6 +12,9 @@ #include #include +/* Set this bit for virtual address instead of SG list. */ +#define CRYPTO_AHASH_REQ_VIRT 0x00000001 + struct crypto_ahash; /** @@ -52,7 +55,10 @@ struct ahash_request { struct crypto_async_request base; unsigned int nbytes; - struct scatterlist *src; + union { + struct scatterlist *src; + const u8 *svirt; + }; u8 *result; void *__ctx[] CRYPTO_MINALIGN_ATTR; @@ -610,9 +616,13 @@ static inline void ahash_request_set_callback(struct ahash_request *req, crypto_completion_t compl, void *data) { + u32 keep = CRYPTO_AHASH_REQ_VIRT; + req->base.complete = compl; req->base.data = data; - req->base.flags = flags; + flags &= ~keep; + req->base.flags &= keep; + req->base.flags |= flags; crypto_reqchain_init(&req->base); } @@ -636,6 +646,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->src = src; req->nbytes = nbytes; req->result = result; + req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT; +} + +/** + * ahash_request_set_virt() - set virtual address data buffers + * @req: ahash_request handle to be updated + * @src: source virtual address + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source virtual address + * + * By using this call, the caller references the source virtual address. + * The source virtual address points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_virt(struct ahash_request *req, + const u8 *src, u8 *result, + unsigned int nbytes) +{ + req->svirt = src; + req->nbytes = nbytes; + req->result = result; + req->base.flags |= CRYPTO_AHASH_REQ_VIRT; } static inline void ahash_request_chain(struct ahash_request *req, diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 36425ecd2c37..485e22cf517e 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -252,6 +252,11 @@ static inline bool ahash_request_chained(struct ahash_request *req) return crypto_request_chained(&req->base); } +static inline bool ahash_request_isvirt(struct ahash_request *req) +{ + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; +} + static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) { return crypto_tfm_req_chain(&tfm->base); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 1d2a6c515d58..61ac11226638 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -126,7 +126,7 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 -/* Set if the algorithm supports request chains. */ +/* Set if the algorithm supports request chains and virtual addresses. */ #define CRYPTO_ALG_REQ_CHAIN 0x00040000 /* -- cgit v1.2.3 From 9e01aaa1033d6e40f8d7cf4f20931a61ce9e3f04 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:24 +0800 Subject: crypto: ahash - Set default reqsize from ahash_alg Add a reqsize field to struct ahash_alg and use it to set the default reqsize so that algorithms with a static reqsize are not forced to create an init_tfm function. Signed-off-by: Herbert Xu --- include/crypto/hash.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 4e87e39679cb..2aa83ee0ec98 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -135,6 +135,7 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. + * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -151,6 +152,8 @@ struct ahash_alg { void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); + unsigned int reqsize; + struct hash_alg_common halg; }; -- cgit v1.2.3 From 3bd4b2c603fce29f6d26da1579d5a013b70b9453 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:23 -0800 Subject: crypto: scatterwalk - move to next sg entry just in time The scatterwalk_* functions are designed to advance to the next sg entry only when there is more data from the request to process. Compared to the alternative of advancing after each step if !sg_is_last(sg), this has the advantage that it doesn't cause problems if users accidentally don't terminate their scatterlist with the end marker (which is an easy mistake to make, and there are examples of this). Currently, the advance to the next sg entry happens in scatterwalk_done(), which is called after each "step" of the walk. It requires the caller to pass in a boolean 'more' that indicates whether there is more data. This works when the caller immediately knows whether there is more data, though it adds some complexity. However in the case of scatterwalk_copychunks() it's not immediately known whether there is more data, so the call to scatterwalk_done() has to happen higher up the stack. This is error-prone, and indeed the needed call to scatterwalk_done() is not always made, e.g. scatterwalk_copychunks() is sometimes called multiple times in a row. This causes a zero-length step to get added in some cases, which is unexpected and seems to work only by accident. This patch begins the switch to a less error-prone approach where the advance to the next sg entry happens just in time instead. For now, that means just doing the advance in scatterwalk_clamp() if it's needed there. Initially this is redundant, but it's needed to keep the tree in a working state as later patches change things to the final state. Later patches will similarly move the dcache flushing logic out of scatterwalk_done() and then remove scatterwalk_done() entirely. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 32fc4473175b..924efbaefe67 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -26,6 +26,13 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head, sg_mark_end(head); } +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) { unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; @@ -36,8 +43,9 @@ static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - return nbytes > len_this_page ? len_this_page : nbytes; + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + return min(nbytes, scatterwalk_pagelen(walk)); } static inline void scatterwalk_advance(struct scatter_walk *walk, @@ -56,13 +64,6 @@ static inline void scatterwalk_unmap(void *vaddr) kunmap_local(vaddr); } -static inline void scatterwalk_start(struct scatter_walk *walk, - struct scatterlist *sg) -{ - walk->sg = sg; - walk->offset = sg->offset; -} - static inline void *scatterwalk_map(struct scatter_walk *walk) { return kmap_local_page(scatterwalk_page(walk)) + -- cgit v1.2.3 From e21d01a2a3f56ee422cd155bf06c5e572523fcc1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:24 -0800 Subject: crypto: scatterwalk - add new functions for skipping data Add scatterwalk_skip() to skip the given number of bytes in a scatter_walk. Previously support for skipping was provided through scatterwalk_copychunks(..., 2) followed by scatterwalk_done(), which was confusing and less efficient. Also add scatterwalk_start_at_pos() which starts a scatter_walk at the given position, equivalent to scatterwalk_start() + scatterwalk_skip(). This addresses another common need in a more streamlined way. Later patches will convert various users to use these functions. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 924efbaefe67..5c7765f601e0 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -33,6 +33,22 @@ static inline void scatterwalk_start(struct scatter_walk *walk, walk->offset = sg->offset; } +/* + * This is equivalent to scatterwalk_start(walk, sg) followed by + * scatterwalk_skip(walk, pos). + */ +static inline void scatterwalk_start_at_pos(struct scatter_walk *walk, + struct scatterlist *sg, + unsigned int pos) +{ + while (pos > sg->length) { + pos -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + pos; +} + static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) { unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; @@ -92,6 +108,8 @@ static inline void scatterwalk_done(struct scatter_walk *walk, int out, scatterwalk_pagedone(walk, out, more); } +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); + void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); -- cgit v1.2.3 From 31b00fe1e2854d19adc6f4f77b7e551673cd96f3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:25 -0800 Subject: crypto: scatterwalk - add new functions for iterating through data Add scatterwalk_next() which consolidates scatterwalk_clamp() and scatterwalk_map(). Also add scatterwalk_done_src() and scatterwalk_done_dst() which consolidate scatterwalk_unmap(), scatterwalk_advance(), and scatterwalk_done() or scatterwalk_pagedone(). A later patch will remove scatterwalk_done() and scatterwalk_pagedone(). The new code eliminates the error-prone 'more' parameter. Advancing to the next sg entry now only happens just-in-time in scatterwalk_next(). The new code also pairs the dcache flush more closely with the actual write, similar to memcpy_to_page(). Previously it was paired with advancing to the next page. This is currently causing bugs where the dcache flush is incorrectly being skipped, usually due to scatterwalk_copychunks() being called without a following scatterwalk_done(). The dcache flush may have been placed where it was in order to not call flush_dcache_page() redundantly when visiting a page more than once. However, that case is rare in practice, and most architectures either do not implement flush_dcache_page() anyway or implement it lazily where it just clears a page flag. Another limitation of the old code was that by the time the flush happened, there was no way to tell if more than one page needed to be flushed. That has been sufficient because the code goes page by page, but I would like to optimize that on !HIGHMEM platforms. The new code makes this possible, and a later patch will implement this optimization. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 69 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 5c7765f601e0..8e83c43016c9 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -64,12 +64,6 @@ static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, return min(nbytes, scatterwalk_pagelen(walk)); } -static inline void scatterwalk_advance(struct scatter_walk *walk, - unsigned int nbytes) -{ - walk->offset += nbytes; -} - static inline struct page *scatterwalk_page(struct scatter_walk *walk) { return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); @@ -86,6 +80,24 @@ static inline void *scatterwalk_map(struct scatter_walk *walk) offset_in_page(walk->offset); } +/** + * scatterwalk_next() - Get the next data buffer in a scatterlist walk + * @walk: the scatter_walk + * @total: the total number of bytes remaining, > 0 + * @nbytes_ret: (out) the next number of bytes available, <= @total + * + * Return: A virtual address for the next segment of data from the scatterlist. + * The caller must call scatterwalk_done_src() or scatterwalk_done_dst() + * when it is done using this virtual address. + */ +static inline void *scatterwalk_next(struct scatter_walk *walk, + unsigned int total, + unsigned int *nbytes_ret) +{ + *nbytes_ret = scatterwalk_clamp(walk, total); + return scatterwalk_map(walk); +} + static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, unsigned int more) { @@ -108,6 +120,51 @@ static inline void scatterwalk_done(struct scatter_walk *walk, int out, scatterwalk_pagedone(walk, out, more); } +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +/** + * scatterwalk_done_src() - Finish one step of a walk of source scatterlist + * @walk: the scatter_walk + * @vaddr: the address returned by scatterwalk_next() + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the @vaddr was not written to, i.e. it is source data. + */ +static inline void scatterwalk_done_src(struct scatter_walk *walk, + const void *vaddr, unsigned int nbytes) +{ + scatterwalk_unmap((void *)vaddr); + scatterwalk_advance(walk, nbytes); +} + +/** + * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist + * @walk: the scatter_walk + * @vaddr: the address returned by scatterwalk_next() + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the @vaddr may have been written to, i.e. it is destination data. + */ +static inline void scatterwalk_done_dst(struct scatter_walk *walk, + void *vaddr, unsigned int nbytes) +{ + scatterwalk_unmap(vaddr); + /* + * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just + * relying on flush_dcache_page() being a no-op when not implemented, + * since otherwise the BUG_ON in sg_page() does not get optimized out. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) + flush_dcache_page(scatterwalk_page(walk)); + scatterwalk_advance(walk, nbytes); +} + void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, -- cgit v1.2.3 From bb699e724f3a6cc5c016dad0724e7ed12bc7278b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:26 -0800 Subject: crypto: scatterwalk - add new functions for copying data Add memcpy_from_sglist() and memcpy_to_sglist() which are more readable versions of scatterwalk_map_and_copy() with the 'out' argument 0 and 1 respectively. They follow the same argument order as memcpy_from_page() and memcpy_to_page() from . Note that in the case of memcpy_from_sglist(), this also happens to be the same argument order that scatterwalk_map_and_copy() uses. The new code is also faster, mainly because it builds the scatter_walk directly without creating a temporary scatterlist. E.g., a 20% performance improvement is seen for copying the AES-GCM auth tag. Make scatterwalk_map_and_copy() be a wrapper around memcpy_from_sglist() and memcpy_to_sglist(). Callers of scatterwalk_map_and_copy() should be updated to call memcpy_from_sglist() or memcpy_to_sglist() directly, but there are a lot of them so they aren't all being updated right away. Also add functions memcpy_from_scatterwalk() and memcpy_to_scatterwalk() which are similar but operate on a scatter_walk instead of a scatterlist. These will replace scatterwalk_copychunks() with the 'out' argument 0 and 1 respectively. Their behavior differs slightly from scatterwalk_copychunks() in that they automatically take care of flushing the dcache when needed, making them easier to use. scatterwalk_copychunks() itself is left unchanged for now. It will be removed after its callers are updated to use other functions instead. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 8e83c43016c9..1689ecd7ddaf 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -170,8 +170,28 @@ void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out); +void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes); + +void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes); + +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes); + +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes); + +/* In new code, please use memcpy_{from,to}_sglist() directly instead. */ +static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, + unsigned int nbytes, int out) +{ + if (out) + memcpy_to_sglist(sg, start, buf, nbytes); + else + memcpy_from_sglist(buf, sg, start, nbytes); +} struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, -- cgit v1.2.3 From 84b1576355c41a935102da5d62bb28e74be3db45 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:27 -0800 Subject: crypto: scatterwalk - add scatterwalk_get_sglist() Add a function that creates a scatterlist that represents the remaining data in a walk. This will be used to replace chain_to_walk() in net/tls/tls_device_fallback.c so that it will no longer need to reach into the internals of struct scatter_walk. Cc: Boris Pismenny Cc: Jakub Kicinski Cc: John Fastabend Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 1689ecd7ddaf..f6262d05a3c7 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -69,6 +69,23 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk) return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); } +/* + * Create a scatterlist that represents the remaining data in a walk. Uses + * chaining to reference the original scatterlist, so this uses at most two + * entries in @sg_out regardless of the number of entries in the original list. + * Assumes that sg_init_table() was already done. + */ +static inline void scatterwalk_get_sglist(struct scatter_walk *walk, + struct scatterlist sg_out[2]) +{ + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + sg_set_page(sg_out, sg_page(walk->sg), + walk->sg->offset + walk->sg->length - walk->offset, + walk->offset); + scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2); +} + static inline void scatterwalk_unmap(void *vaddr) { kunmap_local(vaddr); -- cgit v1.2.3 From fa94e45436c15421284c5cd24d497675b1f46433 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:40 -0800 Subject: crypto: scatterwalk - remove obsolete functions Remove various functions that are no longer used. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index f6262d05a3c7..ac03fdf88b2a 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -115,28 +115,6 @@ static inline void *scatterwalk_next(struct scatter_walk *walk, return scatterwalk_map(walk); } -static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, - unsigned int more) -{ - if (out) { - struct page *page; - - page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - flush_dcache_page(page); - } - - if (more && walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); -} - -static inline void scatterwalk_done(struct scatter_walk *walk, int out, - int more) -{ - if (!more || walk->offset >= walk->sg->offset + walk->sg->length || - !(walk->offset & (PAGE_SIZE - 1))) - scatterwalk_pagedone(walk, out, more); -} - static inline void scatterwalk_advance(struct scatter_walk *walk, unsigned int nbytes) { @@ -184,9 +162,6 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk, void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out); - void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, unsigned int nbytes); -- cgit v1.2.3 From 641938d3bba64287f199431fafd917bc7b7c9d1a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:41 -0800 Subject: crypto: scatterwalk - don't split at page boundaries when !HIGHMEM When !HIGHMEM, the kmap_local_page() in the scatterlist walker does not actually map anything, and the address it returns is just the address from the kernel's direct map, where each sg entry's data is virtually contiguous. To improve performance, stop unnecessarily clamping data segments to page boundaries in this case. For now, still limit segments to PAGE_SIZE. This is needed to prevent preemption from being disabled for too long when SIMD is used, and to support the alignmask case which still uses a page-sized bounce buffer. Even so, this change still helps a lot in cases where messages cross a page boundary. For example, testing IPsec with AES-GCM on x86_64, the messages are 1424 bytes which is less than PAGE_SIZE, but on the Rx side over a third cross a page boundary. These ended up being processed in three parts, with the middle part going through skcipher_next_slow which uses a 16-byte bounce buffer. That was causing a significant amount of overhead which unnecessarily reduced the performance benefit of the new x86_64 AES-GCM assembly code. This change solves the problem; all these messages now get passed to the assembly code in one part. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 79 ++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index ac03fdf88b2a..3024adbdd443 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -49,24 +49,35 @@ static inline void scatterwalk_start_at_pos(struct scatter_walk *walk, walk->offset = sg->offset + pos; } -static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) -{ - unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; - unsigned int len_this_page = offset_in_page(~walk->offset) + 1; - return len_this_page > len ? len : len_this_page; -} - static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { + unsigned int len_this_sg; + unsigned int limit; + if (walk->offset >= walk->sg->offset + walk->sg->length) scatterwalk_start(walk, sg_next(walk->sg)); - return min(nbytes, scatterwalk_pagelen(walk)); -} + len_this_sg = walk->sg->offset + walk->sg->length - walk->offset; -static inline struct page *scatterwalk_page(struct scatter_walk *walk) -{ - return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + /* + * HIGHMEM case: the page may have to be mapped into memory. To avoid + * the complexity of having to map multiple pages at once per sg entry, + * clamp the returned length to not cross a page boundary. + * + * !HIGHMEM case: no mapping is needed; all pages of the sg entry are + * already mapped contiguously in the kernel's direct map. For improved + * performance, allow the walker to return data segments that cross a + * page boundary. Do still cap the length to PAGE_SIZE, since some + * users rely on that to avoid disabling preemption for too long when + * using SIMD. It's also needed for when skcipher_walk uses a bounce + * page due to the data not being aligned to the algorithm's alignmask. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + limit = PAGE_SIZE - offset_in_page(walk->offset); + else + limit = PAGE_SIZE; + + return min3(nbytes, len_this_sg, limit); } /* @@ -86,15 +97,23 @@ static inline void scatterwalk_get_sglist(struct scatter_walk *walk, scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2); } -static inline void scatterwalk_unmap(void *vaddr) -{ - kunmap_local(vaddr); -} - static inline void *scatterwalk_map(struct scatter_walk *walk) { - return kmap_local_page(scatterwalk_page(walk)) + - offset_in_page(walk->offset); + struct page *base_page = sg_page(walk->sg); + + if (IS_ENABLED(CONFIG_HIGHMEM)) + return kmap_local_page(base_page + (walk->offset >> PAGE_SHIFT)) + + offset_in_page(walk->offset); + /* + * When !HIGHMEM we allow the walker to return segments that span a page + * boundary; see scatterwalk_clamp(). To make it clear that in this + * case we're working in the linear buffer of the whole sg entry in the + * kernel's direct map rather than within the mapped buffer of a single + * page, compute the address as an offset from the page_address() of the + * first page of the sg entry. Either way the result is the address in + * the direct map, but this makes it clearer what is really going on. + */ + return page_address(base_page) + walk->offset; } /** @@ -115,6 +134,12 @@ static inline void *scatterwalk_next(struct scatter_walk *walk, return scatterwalk_map(walk); } +static inline void scatterwalk_unmap(const void *vaddr) +{ + if (IS_ENABLED(CONFIG_HIGHMEM)) + kunmap_local(vaddr); +} + static inline void scatterwalk_advance(struct scatter_walk *walk, unsigned int nbytes) { @@ -133,7 +158,7 @@ static inline void scatterwalk_advance(struct scatter_walk *walk, static inline void scatterwalk_done_src(struct scatter_walk *walk, const void *vaddr, unsigned int nbytes) { - scatterwalk_unmap((void *)vaddr); + scatterwalk_unmap(vaddr); scatterwalk_advance(walk, nbytes); } @@ -154,9 +179,19 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk, * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just * relying on flush_dcache_page() being a no-op when not implemented, * since otherwise the BUG_ON in sg_page() does not get optimized out. + * This also avoids having to consider whether the loop would get + * reliably optimized out or not. */ - if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) - flush_dcache_page(scatterwalk_page(walk)); + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) { + struct page *base_page, *start_page, *end_page, *page; + + base_page = sg_page(walk->sg); + start_page = base_page + (walk->offset >> PAGE_SHIFT); + end_page = base_page + ((walk->offset + nbytes + + PAGE_SIZE - 1) >> PAGE_SHIFT); + for (page = start_page; page < end_page; page++) + flush_dcache_page(page); + } scatterwalk_advance(walk, nbytes); } -- cgit v1.2.3 From f79d2d2852facc72b91a78e5c423722c7dc53d72 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 23 Feb 2025 14:27:51 +0800 Subject: crypto: skcipher - Use restrict rather than hand-rolling accesses Rather than accessing 'alg' directly to avoid the aliasing issue which leads to unnecessary reloads, use the __restrict keyword to explicitly tell the compiler that there is no aliasing. This generates equivalent if not superior code on x86 with gcc 12. Note that in skcipher_walk_virt the alg assignment is moved after might_sleep_if because that function is a compiler barrier and forces a reload. Signed-off-by: Herbert Xu --- include/crypto/internal/skcipher.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 4f49621d3eb6..d6ae7a86fed2 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -197,13 +197,15 @@ int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); int skcipher_walk_done(struct skcipher_walk *walk, int res); -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic); -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); static inline void skcipher_walk_abort(struct skcipher_walk *walk) { -- cgit v1.2.3 From 2ac92fedb6369a1a17ff995198b8e84ec0cf7a4e Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 10 Nov 2020 23:57:35 +0000 Subject: crypto/krb5: Add some constants out of sunrpc headers Add some constants from the sunrpc headers. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 include/crypto/krb5.h (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h new file mode 100644 index 000000000000..44a6342471d7 --- /dev/null +++ b/include/crypto/krb5.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos 5 crypto + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_KRB5_H +#define _CRYPTO_KRB5_H + +/* + * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped + * to linux kernel crypto routines. + */ +#define KRB5_ENCTYPE_NULL 0x0000 +#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ +#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ +#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ +#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ +/* XXX deprecated? */ +#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ +#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ +#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008 +#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017 +#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define KRB5_ENCTYPE_UNKNOWN 0x01ff + +#define KRB5_CKSUMTYPE_CRC32 0x0001 +#define KRB5_CKSUMTYPE_RSA_MD4 0x0002 +#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003 +#define KRB5_CKSUMTYPE_DESCBC 0x0004 +#define KRB5_CKSUMTYPE_RSA_MD5 0x0007 +#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008 +#define KRB5_CKSUMTYPE_NIST_SHA 0x0009 +#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ + +/* + * Constants used for key derivation + */ +/* from rfc3961 */ +#define KEY_USAGE_SEED_CHECKSUM (0x99) +#define KEY_USAGE_SEED_ENCRYPTION (0xAA) +#define KEY_USAGE_SEED_INTEGRITY (0x55) + +#endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From d1775a177f7f38156d541c8a3e3c91eaa6e69699 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 17 Jan 2025 11:46:23 +0000 Subject: crypto: Add 'krb5enc' hash and cipher AEAD algorithm Add an AEAD template that does hash-then-cipher (unlike authenc that does cipher-then-hash). This is required for a number of Kerberos 5 encoding types. [!] Note that the net/sunrpc/auth_gss/ implementation gets a pair of ciphers, one non-CTS and one CTS, using the former to do all the aligned blocks and the latter to do the last two blocks if they aren't also aligned. It may be necessary to do this here too for performance reasons - but there are considerations both ways: (1) firstly, there is an optimised assembly version of cts(cbc(aes)) on x86_64 that should be used instead of having two ciphers; (2) secondly, none of the hardware offload drivers seem to offer CTS support (Intel QAT does not, for instance). However, I don't know if it's possible to query the crypto API to find out whether there's an optimised CTS algorithm available. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/authenc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index 5f92a986083c..15a9caa2354a 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h @@ -28,5 +28,7 @@ struct crypto_authenc_keys { int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, unsigned int keylen); +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); #endif /* _CRYPTO_AUTHENC_H */ -- cgit v1.2.3 From 3936f02bf2d3308a7359dd37dd96cd60603d8170 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 10 Nov 2020 17:00:54 +0000 Subject: crypto/krb5: Implement Kerberos crypto core Provide core structures, an encoding-type registry and basic module and config bits for a generic Kerberos crypto library. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index 44a6342471d7..8fa6715ab35b 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -8,6 +8,12 @@ #ifndef _CRYPTO_KRB5_H #define _CRYPTO_KRB5_H +#include +#include + +struct crypto_shash; +struct scatterlist; + /* * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped * to linux kernel crypto routines. @@ -48,4 +54,52 @@ #define KEY_USAGE_SEED_ENCRYPTION (0xAA) #define KEY_USAGE_SEED_INTEGRITY (0x55) +/* + * Mode of operation. + */ +enum krb5_crypto_mode { + KRB5_CHECKSUM_MODE, /* Checksum only */ + KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */ +}; + +struct krb5_buffer { + unsigned int len; + void *data; +}; + +/* + * Kerberos encoding type definition. + */ +struct krb5_enctype { + int etype; /* Encryption (key) type */ + int ctype; /* Checksum type */ + const char *name; /* "Friendly" name */ + const char *encrypt_name; /* Crypto encrypt+checksum name */ + const char *cksum_name; /* Crypto checksum name */ + const char *hash_name; /* Crypto hash name */ + const char *derivation_enc; /* Cipher used in key derivation */ + u16 block_len; /* Length of encryption block */ + u16 conf_len; /* Length of confounder (normally == block_len) */ + u16 cksum_len; /* Length of checksum */ + u16 key_bytes; /* Length of raw key, in bytes */ + u16 key_len; /* Length of final key, in bytes */ + u16 hash_len; /* Length of hash in bytes */ + u16 prf_len; /* Length of PRF() result in bytes */ + u16 Kc_len; /* Length of Kc in bytes */ + u16 Ke_len; /* Length of Ke in bytes */ + u16 Ki_len; /* Length of Ki in bytes */ + bool keyed_cksum; /* T if a keyed cksum */ + + const struct krb5_crypto_profile *profile; + + int (*random_to_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *in, + struct krb5_buffer *out); /* complete key generation */ +}; + +/* + * krb5_api.c + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype); + #endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From 025ac491f4eeb48c03353719f0de20a6db36b826 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2020 09:46:48 +0000 Subject: crypto/krb5: Add an API to query the layout of the crypto section Provide some functions to allow the called to find out about the layout of the crypto section: (1) Calculate, for a given size of data, how big a buffer will be required to hold it and where the data will be within it. (2) Calculate, for an amount of buffer, what's the maximum size of data that will fit therein, and where it will start. (3) Determine where the data will be in a received message. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index 8fa6715ab35b..b414141b8b42 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -101,5 +101,14 @@ struct krb5_enctype { * krb5_api.c */ const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype); +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset); +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset); +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len); #endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From a9c27d2d87a388433db100889262841afe771f7a Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 17 Jan 2025 13:29:24 +0000 Subject: crypto/krb5: Add an API to alloc and prepare a crypto object Add an API by which users of the krb5 crypto library can get an allocated and keyed crypto object. For encryption-mode operation, an AEAD object is returned; for checksum-mode operation, a synchronous hash object is returned. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index b414141b8b42..94af2c558fa1 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -10,6 +10,7 @@ #include #include +#include struct crypto_shash; struct scatterlist; @@ -110,5 +111,11 @@ size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, enum krb5_crypto_mode mode, size_t *_offset, size_t *_len); +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); #endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From 0392b110ccaf543b31842b04c8142f4f8ce7bdec Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 9 Jan 2025 09:03:35 +0000 Subject: crypto/krb5: Add an API to perform requests Add an API by which users of the krb5 crypto library can perform crypto requests, such as encrypt, decrypt, get_mic and verify_mic. These functions take the previously prepared crypto objects to work on. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index 94af2c558fa1..81739e9828d3 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -117,5 +117,26 @@ struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *kr struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, u32 usage, gfp_t gfp); +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); #endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From 41cf1d1e8a86c5c675982136f07c519c4b15b157 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 3 Sep 2020 12:05:04 +0100 Subject: crypto/krb5: Provide infrastructure and key derivation Provide key derivation interface functions and a helper to implement the PRF+ function from rfc4402. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index 81739e9828d3..b12f012cf354 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -139,4 +139,14 @@ int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, struct scatterlist *sg, unsigned int nr_sg, size_t *_offset, size_t *_len); +/* + * krb5_kdf.c + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp); + #endif /* _CRYPTO_KRB5_H */ -- cgit v1.2.3 From 6c3c0e86c2acf53bf67c095c67335a0bec2a16af Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 3 Feb 2025 13:42:41 +0000 Subject: crypto/krb5: Implement the AES enctypes from rfc8009 Implement the aes128-cts-hmac-sha256-128 and aes256-cts-hmac-sha384-192 enctypes from rfc8009, overriding the rfc3961 kerberos 5 simplified crypto scheme. Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index b12f012cf354..b8fda81379ab 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -31,6 +31,8 @@ struct scatterlist; #define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010 #define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 #define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014 #define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017 #define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 #define KRB5_ENCTYPE_UNKNOWN 0x01ff @@ -45,6 +47,8 @@ struct scatterlist; #define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c #define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f #define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013 +#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014 #define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ /* -- cgit v1.2.3 From 742e38d4d4033e7ff53178acf7edd2b1fe0142ef Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 25 Sep 2020 12:24:50 +0100 Subject: crypto/krb5: Implement the Camellia enctypes from rfc6803 Implement the camellia128-cts-cmac and camellia256-cts-cmac enctypes from rfc6803. Note that the test vectors in rfc6803 for encryption are incomplete, lacking the key usage number needed to derive Ke and Ki, and there are errata for this: https://www.rfc-editor.org/errata_search.php?rfc=6803 Signed-off-by: David Howells cc: Herbert Xu cc: "David S. Miller" cc: Chuck Lever cc: Marc Dionne cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Simon Horman cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-crypto@vger.kernel.org cc: netdev@vger.kernel.org --- include/crypto/krb5.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h index b8fda81379ab..62d998e62f47 100644 --- a/include/crypto/krb5.h +++ b/include/crypto/krb5.h @@ -35,6 +35,8 @@ struct scatterlist; #define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014 #define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017 #define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019 +#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a #define KRB5_ENCTYPE_UNKNOWN 0x01ff #define KRB5_CKSUMTYPE_CRC32 0x0001 @@ -47,6 +49,8 @@ struct scatterlist; #define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c #define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f #define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012 #define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013 #define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014 #define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ -- cgit v1.2.3 From c3e054dbdb08fef653ea3ef9e6dca449a214c976 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 25 Feb 2025 13:03:26 +0800 Subject: crypto: api - Move struct crypto_type into internal.h Move the definition of struct crypto_type into internal.h as it is only used by API implementors and not algorithm implementors. Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 11065978d360..94989b2e1350 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -55,20 +55,6 @@ struct scatterlist; struct seq_file; struct sk_buff; -struct crypto_type { - unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); - unsigned int (*extsize)(struct crypto_alg *alg); - int (*init_tfm)(struct crypto_tfm *tfm); - void (*show)(struct seq_file *m, struct crypto_alg *alg); - int (*report)(struct sk_buff *skb, struct crypto_alg *alg); - void (*free)(struct crypto_instance *inst); - - unsigned int type; - unsigned int maskclear; - unsigned int maskset; - unsigned int tfmsize; -}; - struct crypto_instance { struct crypto_alg alg; -- cgit v1.2.3 From cc47f07234f72cbd8e2c973cdbf2a6730660a463 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Feb 2025 17:04:46 +0800 Subject: crypto: lzo - Fix compression buffer overrun Unlike the decompression code, the compression code in LZO never checked for output overruns. It instead assumes that the caller always provides enough buffer space, disregarding the buffer length provided by the caller. Add a safe compression interface that checks for the end of buffer before each write. Use the safe interface in crypto/lzo. Signed-off-by: Herbert Xu Reviewed-by: David Sterba Signed-off-by: Herbert Xu --- include/linux/lzo.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092b2..4d30e3624acd 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -24,10 +24,18 @@ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); -- cgit v1.2.3 From 8f3332eecdd420c4cfc8861c7b63508cac07e227 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Feb 2025 18:14:57 +0800 Subject: crypto: acomp - Remove acomp request flags The acomp request flags field duplicates the base request flags and is confusing. Remove it. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 54937b615239..b6d5136e689d 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -24,7 +24,6 @@ * @dst: Destination data * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced - * @flags: Internal flags * @__ctx: Start of private context data */ struct acomp_req { @@ -33,7 +32,6 @@ struct acomp_req { struct scatterlist *dst; unsigned int slen; unsigned int dlen; - u32 flags; void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -232,9 +230,9 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->slen = slen; req->dlen = dlen; - req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; if (!req->dst) - req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } /** -- cgit v1.2.3 From 20238d49448cdb406da2b9bd3e50f892b26da318 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 29 Sep 2024 14:21:48 +0100 Subject: async_xor: Remove unused 'async_xor_val' async_xor_val has been unused since commit a7c224a820c3 ("md/raid5: convert to new xor compution interface") Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Herbert Xu --- include/linux/async_tx.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5cc73d7e5b52..1ca9f9e05f4f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -167,11 +167,6 @@ async_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offset, int src_cnt, size_t len, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit); - struct dma_async_tx_descriptor * async_xor_val_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offset, -- cgit v1.2.3 From b949f55644a6d1645c0a71f78afabf12aec7c33b Mon Sep 17 00:00:00 2001 From: Dionna Glaze Date: Sat, 8 Mar 2025 12:10:28 +1100 Subject: crypto: ccp - Fix uAPI definitions of PSP errors Additions to the error enum after explicit 0x27 setting for SEV_RET_INVALID_KEY leads to incorrect value assignments. Use explicit values to match the manufacturer specifications more clearly. Fixes: 3a45dc2b419e ("crypto: ccp: Define the SEV-SNP commands") CC: stable@vger.kernel.org Signed-off-by: Dionna Glaze Reviewed-by: Tom Lendacky Signed-off-by: Alexey Kardashevskiy Signed-off-by: Herbert Xu --- include/uapi/linux/psp-sev.h | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 832c15d9155b..eeb20dfb1fda 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -73,13 +73,20 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, - SEV_RET_INVALID_KEY = 0x27, - SEV_RET_INVALID_PAGE_SIZE, - SEV_RET_INVALID_PAGE_STATE, - SEV_RET_INVALID_MDATA_ENTRY, - SEV_RET_INVALID_PAGE_OWNER, - SEV_RET_INVALID_PAGE_AEAD_OFLOW, - SEV_RET_RMP_INIT_REQUIRED, + SEV_RET_INVALID_PAGE_SIZE = 0x0019, + SEV_RET_INVALID_PAGE_STATE = 0x001A, + SEV_RET_INVALID_MDATA_ENTRY = 0x001B, + SEV_RET_INVALID_PAGE_OWNER = 0x001C, + SEV_RET_AEAD_OFLOW = 0x001D, + SEV_RET_EXIT_RING_BUFFER = 0x001F, + SEV_RET_RMP_INIT_REQUIRED = 0x0020, + SEV_RET_BAD_SVN = 0x0021, + SEV_RET_BAD_VERSION = 0x0022, + SEV_RET_SHUTDOWN_REQUIRED = 0x0023, + SEV_RET_UPDATE_FAILED = 0x0024, + SEV_RET_RESTORE_REQUIRED = 0x0025, + SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026, + SEV_RET_INVALID_KEY = 0x0027, SEV_RET_MAX, } sev_ret_code; -- cgit v1.2.3 From 65775cf313987926e9746b0ca7f5519d297af2da Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Mar 2025 20:45:21 +0800 Subject: crypto: scatterwalk - Change scatterwalk_next calling convention Rather than returning the address and storing the length into an argument pointer, add an address field to the walk struct and use that to store the address. The length is returned directly. Change the done functions to use this stored address instead of getting them from the caller. Split the address into two using a union. The user should only access the const version so that it is never changed. Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 7 +++++++ include/crypto/scatterwalk.h | 35 ++++++++++++++++++----------------- 2 files changed, 25 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 94989b2e1350..f92e22686a68 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -54,6 +54,7 @@ struct rtattr; struct scatterlist; struct seq_file; struct sk_buff; +union crypto_no_such_thing; struct crypto_instance { struct crypto_alg alg; @@ -108,6 +109,12 @@ struct crypto_queue { struct scatter_walk { struct scatterlist *sg; unsigned int offset; + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; }; struct crypto_attr_alg { diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 3024adbdd443..8523c7591d95 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -120,18 +120,20 @@ static inline void *scatterwalk_map(struct scatter_walk *walk) * scatterwalk_next() - Get the next data buffer in a scatterlist walk * @walk: the scatter_walk * @total: the total number of bytes remaining, > 0 - * @nbytes_ret: (out) the next number of bytes available, <= @total * - * Return: A virtual address for the next segment of data from the scatterlist. - * The caller must call scatterwalk_done_src() or scatterwalk_done_dst() - * when it is done using this virtual address. + * A virtual address for the next segment of data from the scatterlist will + * be placed into @walk->addr. The caller must call scatterwalk_done_src() + * or scatterwalk_done_dst() when it is done using this virtual address. + * + * Returns: the next number of bytes available, <= @total */ -static inline void *scatterwalk_next(struct scatter_walk *walk, - unsigned int total, - unsigned int *nbytes_ret) +static inline unsigned int scatterwalk_next(struct scatter_walk *walk, + unsigned int total) { - *nbytes_ret = scatterwalk_clamp(walk, total); - return scatterwalk_map(walk); + unsigned int nbytes = scatterwalk_clamp(walk, total); + + walk->__addr = scatterwalk_map(walk); + return nbytes; } static inline void scatterwalk_unmap(const void *vaddr) @@ -149,32 +151,31 @@ static inline void scatterwalk_advance(struct scatter_walk *walk, /** * scatterwalk_done_src() - Finish one step of a walk of source scatterlist * @walk: the scatter_walk - * @vaddr: the address returned by scatterwalk_next() * @nbytes: the number of bytes processed this step, less than or equal to the * number of bytes that scatterwalk_next() returned. * - * Use this if the @vaddr was not written to, i.e. it is source data. + * Use this if the mapped address was not written to, i.e. it is source data. */ static inline void scatterwalk_done_src(struct scatter_walk *walk, - const void *vaddr, unsigned int nbytes) + unsigned int nbytes) { - scatterwalk_unmap(vaddr); + scatterwalk_unmap(walk->addr); scatterwalk_advance(walk, nbytes); } /** * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist * @walk: the scatter_walk - * @vaddr: the address returned by scatterwalk_next() * @nbytes: the number of bytes processed this step, less than or equal to the * number of bytes that scatterwalk_next() returned. * - * Use this if the @vaddr may have been written to, i.e. it is destination data. + * Use this if the mapped address may have been written to, i.e. it is + * destination data. */ static inline void scatterwalk_done_dst(struct scatter_walk *walk, - void *vaddr, unsigned int nbytes) + unsigned int nbytes) { - scatterwalk_unmap(vaddr); + scatterwalk_unmap(walk->addr); /* * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just * relying on flush_dcache_page() being a no-op when not implemented, -- cgit v1.2.3 From 131bdceca1f0a2d9381270dc40f898458e5e184b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Mar 2025 20:45:23 +0800 Subject: crypto: scatterwalk - Add memcpy_sglist Add memcpy_sglist which copies one SG list to another. Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 8523c7591d95..c62f47d04eb1 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -210,6 +210,9 @@ void memcpy_from_sglist(void *buf, struct scatterlist *sg, void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, const void *buf, unsigned int nbytes); +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + /* In new code, please use memcpy_{from,to}_sglist() directly instead. */ static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, -- cgit v1.2.3 From db873be6f0549597f92c72986b1939643a7f9a75 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Mar 2025 20:45:25 +0800 Subject: crypto: skcipher - Eliminate duplicate virt.addr field Reuse the addr field from struct scatter_walk for skcipher_walk. Keep the existing virt.addr fields but make them const for the user to access the mapped address. Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 5 +++-- include/crypto/internal/skcipher.h | 26 +++++++++++++++++++++----- 2 files changed, 24 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index f92e22686a68..6e07bbc04089 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -107,14 +107,15 @@ struct crypto_queue { }; struct scatter_walk { - struct scatterlist *sg; - unsigned int offset; + /* Must be the first member, see struct skcipher_walk. */ union { void *const addr; /* Private API field, do not touch. */ union crypto_no_such_thing *__addr; }; + struct scatterlist *sg; + unsigned int offset; }; struct crypto_attr_alg { diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index d6ae7a86fed2..c705124432c5 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -56,15 +56,31 @@ struct crypto_lskcipher_spawn { struct skcipher_walk { union { + /* Virtual address of the source. */ struct { - void *addr; - } virt; - } src, dst; + struct { + void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; - struct scatter_walk in; unsigned int nbytes; - struct scatter_walk out; + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + unsigned int total; u8 *page; -- cgit v1.2.3 From 37d451809f572ec197d3c18d9638c8715274255f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Mar 2025 20:53:13 +0800 Subject: crypto: skcipher - Make skcipher_walk src.virt.addr const Mark the src.virt.addr field in struct skcipher_walk as a pointer to const data. This guarantees that the user won't modify the data which should be done through dst.virt.addr to ensure that flushing is done when necessary. Signed-off-by: Herbert Xu --- include/crypto/ctr.h | 2 +- include/crypto/internal/skcipher.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index a1c66d1001af..da1ee73e9ce9 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -34,8 +34,8 @@ static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - u8 *src = walk.src.virt.addr; int nbytes = walk.nbytes; int tail = 0; diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index c705124432c5..a958ab0636ad 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -59,7 +59,7 @@ struct skcipher_walk { /* Virtual address of the source. */ struct { struct { - void *const addr; + const void *const addr; } virt; } src; -- cgit v1.2.3 From 0af7304c0696ec5b3589af6973b7f27e014c2903 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 9 Mar 2025 10:43:14 +0800 Subject: crypto: scomp - Remove tfm argument from alloc/free_ctx The tfm argument is completely unused and meaningless as the same stream object is identical over all transforms of a given algorithm. Remove it. Signed-off-by: Herbert Xu --- include/crypto/internal/scompress.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 07a10fd2d321..6ba9974df7d3 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -31,8 +31,8 @@ struct crypto_scomp { * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(struct crypto_scomp *tfm); - void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -73,13 +73,13 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) { - return crypto_scomp_alg(tfm)->alloc_ctx(tfm); + return crypto_scomp_alg(tfm)->alloc_ctx(); } static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, void *ctx) { - return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); + return crypto_scomp_alg(tfm)->free_ctx(ctx); } static inline int crypto_scomp_compress(struct crypto_scomp *tfm, -- cgit v1.2.3 From 3d72ad46a23ae42450d1f475bb472151dede5b93 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 9 Mar 2025 10:43:17 +0800 Subject: crypto: acomp - Move stream management into scomp layer Rather than allocating the stream memory in the request object, move it into a per-cpu buffer managed by scomp. This takes the stress off the user from having to manage large request objects and setting up their own per-cpu buffers in order to do so. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 26 ++++++++++++++++++++++++-- include/crypto/internal/acompress.h | 17 +---------------- include/crypto/internal/scompress.h | 12 +----------- 3 files changed, 26 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index b6d5136e689d..c4937709ad0e 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -10,8 +10,12 @@ #define _CRYPTO_ACOMP_H #include +#include #include #include +#include +#include +#include #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 #define CRYPTO_ACOMP_DST_MAX 131072 @@ -54,8 +58,14 @@ struct crypto_acomp { struct crypto_tfm base; }; +struct crypto_acomp_stream { + spinlock_t lock; + void *ctx; +}; + #define COMP_ALG_COMMON { \ struct crypto_alg base; \ + struct crypto_acomp_stream __percpu *stream; \ } struct comp_alg_common COMP_ALG_COMMON; @@ -173,7 +183,16 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) * * Return: allocated handle in case of success or NULL in case of an error */ -struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); +static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} +#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) /** * acomp_request_free() -- zeroize and free asynchronous (de)compression @@ -182,7 +201,10 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); * * @req: request to free */ -void acomp_request_free(struct acomp_req *req); +static inline void acomp_request_free(struct acomp_req *req) +{ + kfree_sensitive(req); +} /** * acomp_request_set_callback() -- Sets an asynchronous callback diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 8831edaafc05..4a8f7e3beaa1 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -32,6 +32,7 @@ * * @reqsize: Context size for (de)compression requests * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { @@ -68,22 +69,6 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) -{ - struct acomp_req *req; - - req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); - if (likely(req)) - acomp_request_set_tfm(req, tfm); - return req; -} -#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) - -static inline void __acomp_request_free(struct acomp_req *req) -{ - kfree_sensitive(req); -} - /** * crypto_register_acomp() -- Register asynchronous compression algorithm * diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 6ba9974df7d3..88986ab8ce15 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -28,6 +28,7 @@ struct crypto_scomp { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { @@ -71,17 +72,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); } -static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) -{ - return crypto_scomp_alg(tfm)->alloc_ctx(); -} - -static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, - void *ctx) -{ - return crypto_scomp_alg(tfm)->free_ctx(ctx); -} - static inline int crypto_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) -- cgit v1.2.3 From b67a026003725a5d2496eba691c293694ab4847a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 9 Mar 2025 10:43:21 +0800 Subject: crypto: acomp - Add request chaining and virtual addresses This adds request chaining and virtual address support to the acomp interface. It is identical to the ahash interface, except that a new flag CRYPTO_ACOMP_REQ_NONDMA has been added to indicate that the virtual addresses are not suitable for DMA. This is because all existing and potential acomp users can provide memory that is suitable for DMA so there is no need for a fall-back copy path. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 198 +++++++++++++++++++++++++++++++++--- include/crypto/internal/acompress.h | 42 ++++++++ 2 files changed, 227 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index c4937709ad0e..c4d8a29274c6 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -13,13 +13,42 @@ #include #include #include +#include #include #include #include #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 + +/* Set this bit if source is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 + +/* Set this bit for if virtual address source cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 + +/* Set this bit if destination is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 + +/* Set this bit for if virtual address destination cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 + #define CRYPTO_ACOMP_DST_MAX 131072 +struct acomp_req; + +struct acomp_req_chain { + struct list_head head; + struct acomp_req *req0; + struct acomp_req *cur; + int (*op)(struct acomp_req *req); + crypto_completion_t compl; + void *data; + struct scatterlist ssg; + struct scatterlist dsg; + const u8 *src; + u8 *dst; +}; + /** * struct acomp_req - asynchronous (de)compression request * @@ -28,14 +57,24 @@ * @dst: Destination data * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced + * @chain: Private API code data, do not use * @__ctx: Start of private context data */ struct acomp_req { struct crypto_async_request base; - struct scatterlist *src; - struct scatterlist *dst; + union { + struct scatterlist *src; + const u8 *svirt; + }; + union { + struct scatterlist *dst; + u8 *dvirt; + }; unsigned int slen; unsigned int dlen; + + struct acomp_req_chain chain; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -222,10 +261,16 @@ static inline void acomp_request_set_callback(struct acomp_req *req, crypto_completion_t cmpl, void *data) { + u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.complete = cmpl; req->base.data = data; - req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT; - req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= keep; + req->base.flags |= flgs & ~keep; + + crypto_reqchain_init(&req->base); } /** @@ -252,11 +297,144 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->slen = slen; req->dlen = dlen; - req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT | + CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_DST_NONDMA); if (!req->dst) req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } +/** + * acomp_request_set_src_sg() -- Sets source scatterlist + * + * Sets source scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_sg(struct acomp_req *req, + struct scatterlist *src, + unsigned int slen) +{ + req->src = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_dma() -- Sets DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_dma(struct acomp_req *req, + const u8 *src, unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_nondma(struct acomp_req *req, + const u8 *src, + unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_dst_sg() -- Sets destination scatterlist + * + * Sets destination scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @dst: pointer to output buffer scatterlist + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_sg(struct acomp_req *req, + struct scatterlist *dst, + unsigned int dlen) +{ + req->dst = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_dma() -- Sets DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_dma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_nondma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +static inline void acomp_request_chain(struct acomp_req *req, + struct acomp_req *head) +{ + crypto_request_chain(&req->base, &head->base); +} + /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -266,10 +444,7 @@ static inline void acomp_request_set_params(struct acomp_req *req, * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_compress(struct acomp_req *req) -{ - return crypto_acomp_reqtfm(req)->compress(req); -} +int crypto_acomp_compress(struct acomp_req *req); /** * crypto_acomp_decompress() -- Invoke asynchronous decompress operation @@ -280,9 +455,6 @@ static inline int crypto_acomp_compress(struct acomp_req *req) * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_decompress(struct acomp_req *req) -{ - return crypto_acomp_reqtfm(req)->decompress(req); -} +int crypto_acomp_decompress(struct acomp_req *req); #endif diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 4a8f7e3beaa1..957a5ed7c7f1 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -94,4 +94,46 @@ void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); +static inline bool acomp_request_chained(struct acomp_req *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool acomp_request_src_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +static inline bool acomp_request_dst_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT; +} + +static inline bool acomp_request_isvirt(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); +} + +static inline bool acomp_request_src_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA; +} + +static inline bool acomp_request_dst_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA; +} + +static inline bool acomp_request_isnondma(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif -- cgit v1.2.3 From 7450ebd29cd9b9745f005f2609badacea15fbe30 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 10 Mar 2025 10:20:16 -0700 Subject: crypto: scatterwalk - simplify map and unmap calling convention Now that the address returned by scatterwalk_map() is always being stored into the same struct scatter_walk that is passed in, make scatterwalk_map() do so itself and return void. Similarly, now that scatterwalk_unmap() is always being passed the address field within a struct scatter_walk, make scatterwalk_unmap() take a pointer to struct scatter_walk instead of the address directly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index c62f47d04eb1..b7e617ae4442 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -97,23 +97,28 @@ static inline void scatterwalk_get_sglist(struct scatter_walk *walk, scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2); } -static inline void *scatterwalk_map(struct scatter_walk *walk) +static inline void scatterwalk_map(struct scatter_walk *walk) { struct page *base_page = sg_page(walk->sg); - if (IS_ENABLED(CONFIG_HIGHMEM)) - return kmap_local_page(base_page + (walk->offset >> PAGE_SHIFT)) + - offset_in_page(walk->offset); - /* - * When !HIGHMEM we allow the walker to return segments that span a page - * boundary; see scatterwalk_clamp(). To make it clear that in this - * case we're working in the linear buffer of the whole sg entry in the - * kernel's direct map rather than within the mapped buffer of a single - * page, compute the address as an offset from the page_address() of the - * first page of the sg entry. Either way the result is the address in - * the direct map, but this makes it clearer what is really going on. - */ - return page_address(base_page) + walk->offset; + if (IS_ENABLED(CONFIG_HIGHMEM)) { + walk->__addr = kmap_local_page(base_page + + (walk->offset >> PAGE_SHIFT)) + + offset_in_page(walk->offset); + } else { + /* + * When !HIGHMEM we allow the walker to return segments that + * span a page boundary; see scatterwalk_clamp(). To make it + * clear that in this case we're working in the linear buffer of + * the whole sg entry in the kernel's direct map rather than + * within the mapped buffer of a single page, compute the + * address as an offset from the page_address() of the first + * page of the sg entry. Either way the result is the address + * in the direct map, but this makes it clearer what is really + * going on. + */ + walk->__addr = page_address(base_page) + walk->offset; + } } /** @@ -132,14 +137,14 @@ static inline unsigned int scatterwalk_next(struct scatter_walk *walk, { unsigned int nbytes = scatterwalk_clamp(walk, total); - walk->__addr = scatterwalk_map(walk); + scatterwalk_map(walk); return nbytes; } -static inline void scatterwalk_unmap(const void *vaddr) +static inline void scatterwalk_unmap(struct scatter_walk *walk) { if (IS_ENABLED(CONFIG_HIGHMEM)) - kunmap_local(vaddr); + kunmap_local(walk->__addr); } static inline void scatterwalk_advance(struct scatter_walk *walk, @@ -159,7 +164,7 @@ static inline void scatterwalk_advance(struct scatter_walk *walk, static inline void scatterwalk_done_src(struct scatter_walk *walk, unsigned int nbytes) { - scatterwalk_unmap(walk->addr); + scatterwalk_unmap(walk); scatterwalk_advance(walk, nbytes); } @@ -175,7 +180,7 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk, static inline void scatterwalk_done_dst(struct scatter_walk *walk, unsigned int nbytes) { - scatterwalk_unmap(walk->addr); + scatterwalk_unmap(walk); /* * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just * relying on flush_dcache_page() being a no-op when not implemented, -- cgit v1.2.3 From fc8d5bba61ad8087af9a56337a7a297af6b46129 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Mar 2025 13:14:53 +0800 Subject: lib/scatterlist: Add SG_MITER_LOCAL and use it Add kmap_local support to the scatterlist iterator. Use it for all the helper functions in lib/scatterlist. Signed-off-by: Herbert Xu --- include/linux/scatterlist.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index d836e7440ee8..138e2f1bd08f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -671,6 +671,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ #define SG_MITER_FROM_SG (1 << 2) /* nop */ +#define SG_MITER_LOCAL (1 << 3) /* use kmap_local */ struct sg_mapping_iter { /* the following three fields can be accessed directly */ -- cgit v1.2.3 From e9ed7aff2554176cac0c49907e14d55679d67f8a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Mar 2025 11:27:20 +0800 Subject: crypto: scatterwalk - Use nth_page instead of doing it by hand Curiously, the Crypto API scatterwalk incremented pages by hand rather than using nth_page. Possibly because scatterwalk predates nth_page (the following commit is from the history tree): commit 3957f2b34960d85b63e814262a8be7d5ad91444d Author: James Morris Date: Sun Feb 2 07:35:32 2003 -0800 [CRYPTO]: in/out scatterlist support for ciphers. Fix this by using nth_page. Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index b7e617ae4442..94a8585f26b2 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -100,11 +100,15 @@ static inline void scatterwalk_get_sglist(struct scatter_walk *walk, static inline void scatterwalk_map(struct scatter_walk *walk) { struct page *base_page = sg_page(walk->sg); + unsigned int offset = walk->offset; + void *addr; if (IS_ENABLED(CONFIG_HIGHMEM)) { - walk->__addr = kmap_local_page(base_page + - (walk->offset >> PAGE_SHIFT)) + - offset_in_page(walk->offset); + struct page *page; + + page = nth_page(base_page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + addr = kmap_local_page(page) + offset; } else { /* * When !HIGHMEM we allow the walker to return segments that @@ -117,8 +121,10 @@ static inline void scatterwalk_map(struct scatter_walk *walk) * in the direct map, but this makes it clearer what is really * going on. */ - walk->__addr = page_address(base_page) + walk->offset; + addr = page_address(base_page) + offset; } + + walk->__addr = addr; } /** @@ -189,14 +195,18 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk, * reliably optimized out or not. */ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) { - struct page *base_page, *start_page, *end_page, *page; + struct page *base_page; + unsigned int offset; + int start, end, i; base_page = sg_page(walk->sg); - start_page = base_page + (walk->offset >> PAGE_SHIFT); - end_page = base_page + ((walk->offset + nbytes + - PAGE_SIZE - 1) >> PAGE_SHIFT); - for (page = start_page; page < end_page; page++) - flush_dcache_page(page); + offset = walk->offset; + start = offset >> PAGE_SHIFT; + end = start + (nbytes >> PAGE_SHIFT); + end += (offset_in_page(offset) + offset_in_page(nbytes) + + PAGE_SIZE - 1) >> PAGE_SHIFT; + for (i = start; i < end; i++) + flush_dcache_page(nth_page(base_page, i)); } scatterwalk_advance(walk, nbytes); } -- cgit v1.2.3 From 2d3553ecb4e316a74571da253191c37fb90cb815 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:22 +0800 Subject: crypto: scomp - Remove support for some non-trivial SG lists As the only user of acomp/scomp uses a trivial single-page SG list, remove support for everything else in preprataion for the addition of virtual address support. However, keep support for non-trivial source SG lists as that user is currently jumping through hoops in order to linearise the source data. Limit the source SG linearisation buffer to a single page as that user never goes over that. The only other potential user is also unlikely to exceed that (IPComp) and it can easily do its own linearisation if necessary. Also keep the destination SG linearisation for IPComp. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 17 +++-------------- include/crypto/internal/scompress.h | 2 -- 2 files changed, 3 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index c4d8a29274c6..53c9e632862b 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -18,8 +18,6 @@ #include #include -#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 - /* Set this bit if source is virtual address instead of SG list. */ #define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 @@ -84,15 +82,12 @@ struct acomp_req { * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the - * algorithm * @reqsize: Context size for (de)compression requests * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); unsigned int reqsize; struct crypto_tfm base; }; @@ -261,9 +256,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req, crypto_completion_t cmpl, void *data) { - u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_SRC_VIRT | - CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | - CRYPTO_ACOMP_REQ_DST_NONDMA; + u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.complete = cmpl; req->base.data = data; @@ -297,13 +291,10 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->slen = slen; req->dlen = dlen; - req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT | - CRYPTO_ACOMP_REQ_SRC_VIRT | + req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA); - if (!req->dst) - req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } /** @@ -403,7 +394,6 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req, req->dvirt = dst; req->dlen = dlen; - req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } @@ -424,7 +414,6 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req, req->dvirt = dst; req->dlen = dlen; - req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 88986ab8ce15..f25aa2ea3b48 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -12,8 +12,6 @@ #include #include -#define SCOMP_SCRATCH_SIZE 131072 - struct acomp_req; struct crypto_scomp { -- cgit v1.2.3 From 7cf97a11743a66b67e8225545f0998fa1a3455d4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:29 +0800 Subject: crypto: acomp - Remove dst_free Remove the unused dst_free hook. Signed-off-by: Herbert Xu --- include/crypto/internal/acompress.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 957a5ed7c7f1..575dbcbc0df4 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -17,7 +17,6 @@ * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the algorithm * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -38,7 +37,6 @@ struct acomp_alg { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); int (*init)(struct crypto_acomp *tfm); void (*exit)(struct crypto_acomp *tfm); -- cgit v1.2.3 From 5416b8a741d6d09369b973cd9d4dacb1887c24df Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:34 +0800 Subject: crypto: acomp - Add ACOMP_REQUEST_ALLOC and acomp_request_alloc_extra Add ACOMP_REQUEST_ALLOC which is a wrapper around acomp_request_alloc that falls back to a synchronous stack reqeust if the allocation fails. Also add ACOMP_REQUEST_ON_STACK which stores the request on the stack only. The request should be freed with acomp_request_free. Finally add acomp_request_alloc_extra which gives the user extra memory to use in conjunction with the request. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 80 +++++++++++++++++++++++++++++++++++-- include/crypto/internal/acompress.h | 6 +++ include/linux/crypto.h | 1 + 3 files changed, 84 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 53c9e632862b..03cb381c2c54 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -10,9 +10,11 @@ #define _CRYPTO_ACOMP_H #include +#include #include #include #include +#include #include #include #include @@ -32,6 +34,14 @@ #define CRYPTO_ACOMP_DST_MAX 131072 +#define MAX_SYNC_COMP_REQSIZE 0 + +#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), (gfp), false) + struct acomp_req; struct acomp_req_chain { @@ -83,12 +93,14 @@ struct acomp_req { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @reqsize: Context size for (de)compression requests + * @fb: Synchronous fallback tfm * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); unsigned int reqsize; + struct crypto_acomp *fb; struct crypto_tfm base; }; @@ -210,24 +222,68 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); +} + +static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); +} + /** * acomp_request_alloc() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error */ -static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp *tfm) +static inline struct acomp_req *acomp_request_alloc_extra_noprof( + struct crypto_acomp *tfm, size_t extra, gfp_t gfp) { struct acomp_req *req; + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + if (check_add_overflow(len, extra, &len)) + return NULL; - req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + req = kzalloc_noprof(len, gfp); if (likely(req)) acomp_request_set_tfm(req, tfm); return req; } +#define acomp_request_alloc_noprof(tfm, ...) \ + CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ + tfm, ##__VA_ARGS__) +#define acomp_request_alloc_noprof_0(tfm) \ + acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) +#define acomp_request_alloc_noprof_1(tfm, gfp) \ + acomp_request_alloc_extra_noprof(tfm, 0, gfp) #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) +/** + * acomp_request_alloc_extra() -- allocate acomp request with extra memory + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @extra: amount of extra memory + * @gfp: gfp to pass to kzalloc + * + * Return: allocated handle in case of success or NULL in case of an error + */ +#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) + +static inline void *acomp_request_extra(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + return (void *)((char *)req + len); +} + /** * acomp_request_free() -- zeroize and free asynchronous (de)compression * request as well as the output buffer if allocated @@ -237,6 +293,8 @@ static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp * */ static inline void acomp_request_free(struct acomp_req *req) { + if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) + return; kfree_sensitive(req); } @@ -257,7 +315,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req, void *data) { u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | - CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA; + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | + CRYPTO_TFM_REQ_ON_STACK; req->base.complete = cmpl; req->base.data = data; @@ -446,4 +505,19 @@ int crypto_acomp_compress(struct acomp_req *req); */ int crypto_acomp_decompress(struct acomp_req *req); +static inline struct acomp_req *acomp_request_on_stack_init( + char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) +{ + struct acomp_req *req; + + if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) + return req; + + req = (void *)buf; + acomp_request_set_tfm(req, tfm->fb); + req->base.flags = CRYPTO_TFM_REQ_ON_STACK; + + return req; +} + #endif diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 575dbcbc0df4..c1ed55a0e3bf 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -12,6 +12,12 @@ #include #include +#define ACOMP_REQUEST_ON_STACK(name, tfm) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), 0, true) + /** * struct acomp_alg - asynchronous compression algorithm * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 61ac11226638..ea3b95bdbde3 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -138,6 +138,7 @@ #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_REQ_ON_STACK 0x00000800 /* * Miscellaneous stuff. -- cgit v1.2.3 From 8a6771cda3f48a4d954647d69ff0094346db6191 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:40 +0800 Subject: crypto: acomp - Add support for folios For many users, it's easier to supply a folio rather than an SG list since they already have them. Add support for folios to the acomp interface. Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 89 +++++++++++++++++++++++++++++++++++-- include/crypto/internal/acompress.h | 18 ++++++++ 2 files changed, 103 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 03cb381c2c54..c497c73baf13 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -32,6 +32,12 @@ /* Set this bit for if virtual address destination cannot be used for DMA. */ #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 +/* Set this bit if source is a folio. */ +#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020 + +/* Set this bit if destination is a folio. */ +#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040 + #define CRYPTO_ACOMP_DST_MAX 131072 #define MAX_SYNC_COMP_REQSIZE 0 @@ -43,6 +49,7 @@ __##name##_req, (tfm), (gfp), false) struct acomp_req; +struct folio; struct acomp_req_chain { struct list_head head; @@ -53,16 +60,31 @@ struct acomp_req_chain { void *data; struct scatterlist ssg; struct scatterlist dsg; - const u8 *src; - u8 *dst; + union { + const u8 *src; + struct folio *sfolio; + }; + union { + u8 *dst; + struct folio *dfolio; + }; + size_t soff; + size_t doff; + u32 flags; }; /** * struct acomp_req - asynchronous (de)compression request * * @base: Common attributes for asynchronous crypto requests - * @src: Source Data - * @dst: Destination data + * @src: Source scatterlist + * @dst: Destination scatterlist + * @svirt: Source virtual address + * @dvirt: Destination virtual address + * @sfolio: Source folio + * @soff: Source folio offset + * @dfolio: Destination folio + * @doff: Destination folio offset * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced * @chain: Private API code data, do not use @@ -73,11 +95,15 @@ struct acomp_req { union { struct scatterlist *src; const u8 *svirt; + struct folio *sfolio; }; union { struct scatterlist *dst; u8 *dvirt; + struct folio *dfolio; }; + size_t soff; + size_t doff; unsigned int slen; unsigned int dlen; @@ -316,6 +342,7 @@ static inline void acomp_request_set_callback(struct acomp_req *req, { u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO | CRYPTO_TFM_REQ_ON_STACK; req->base.complete = cmpl; @@ -352,6 +379,8 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO | CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA); } @@ -374,6 +403,7 @@ static inline void acomp_request_set_src_sg(struct acomp_req *req, req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; } /** @@ -393,6 +423,7 @@ static inline void acomp_request_set_src_dma(struct acomp_req *req, req->slen = slen; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } @@ -413,10 +444,34 @@ static inline void acomp_request_set_src_nondma(struct acomp_req *req, req->svirt = src; req->slen = slen; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } +/** + * acomp_request_set_src_folio() -- Sets source folio + * + * Sets source folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_src_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->sfolio = folio; + req->soff = off; + req->slen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + /** * acomp_request_set_dst_sg() -- Sets destination scatterlist * @@ -435,6 +490,7 @@ static inline void acomp_request_set_dst_sg(struct acomp_req *req, req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; } /** @@ -454,6 +510,7 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req, req->dlen = dlen; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } @@ -473,10 +530,34 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req, req->dvirt = dst; req->dlen = dlen; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } +/** + * acomp_request_set_dst_folio() -- Sets destination folio + * + * Sets destination folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_dst_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->dfolio = folio; + req->doff = off; + req->dlen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO; +} + static inline void acomp_request_chain(struct acomp_req *req, struct acomp_req *head) { diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index c1ed55a0e3bf..aaf59f3236fa 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -103,6 +103,14 @@ static inline bool acomp_request_chained(struct acomp_req *req) return crypto_request_chained(&req->base); } +static inline bool acomp_request_issg(struct acomp_req *req) +{ + return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO)); +} + static inline bool acomp_request_src_isvirt(struct acomp_req *req) { return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT; @@ -135,6 +143,16 @@ static inline bool acomp_request_isnondma(struct acomp_req *req) CRYPTO_ACOMP_REQ_DST_NONDMA); } +static inline bool acomp_request_src_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +static inline bool acomp_request_dst_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO; +} + static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm) { return crypto_tfm_req_chain(&tfm->base); -- cgit v1.2.3 From eb2953d26971f3083bbf95de4bc997b5bedf0b6e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:43 +0800 Subject: xfrm: ipcomp: Use crypto_acomp interface Replace the legacy comperssion interface with the new acomp interface. This is the first user to make full user of the asynchronous nature of acomp by plugging into the existing xfrm resume interface. As a result of SG support by acomp, the linear scratch buffer in ipcomp can be removed. Signed-off-by: Herbert Xu --- include/net/ipcomp.h | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index 8660a2a6d1fc..51401f01e2a5 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -3,20 +3,9 @@ #define _NET_IPCOMP_H #include -#include - -#define IPCOMP_SCRATCH_SIZE 65400 - -struct crypto_comp; -struct ip_comp_hdr; - -struct ipcomp_data { - u16 threshold; - struct crypto_comp * __percpu *tfms; -}; struct ip_comp_hdr; -struct sk_buff; +struct netlink_ext_ack; struct xfrm_state; int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb); -- cgit v1.2.3 From fce8b8d5986b76a4fdd062e3eec1bb6420fee6c5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 16 Mar 2025 09:21:27 +0800 Subject: crypto: remove obsolete 'comp' compression API The 'comp' compression API has been superseded by the acomp API, which is a bit more cumbersome to use, but ultimately more flexible when it comes to hardware implementations. Now that all the users and implementations have been removed, let's remove the core plumbing of the 'comp' API as well. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- include/linux/crypto.h | 76 +------------------------------------------------- 1 file changed, 1 insertion(+), 75 deletions(-) (limited to 'include') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ea3b95bdbde3..1e3809d28abd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,7 +24,6 @@ */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 @@ -246,26 +245,7 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -/** - * struct compress_alg - compression/decompression algorithm - * @coa_compress: Compress a buffer of specified length, storing the resulting - * data in the specified buffer. Return the length of the - * compressed data in dlen. - * @coa_decompress: Decompress the source buffer, storing the uncompressed - * data in the specified buffer. The length of the data is - * returned in dlen. - * - * All fields are mandatory. - */ -struct compress_alg { - int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); - int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); -}; - #define cra_cipher cra_u.cipher -#define cra_compress cra_u.compress /** * struct crypto_alg - definition of a cryptograpic cipher algorithm @@ -316,7 +296,7 @@ struct compress_alg { * transformation types. There are multiple options, such as * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. * This field might be empty. In that case, there are no common - * callbacks. This is the case for: cipher, compress, shash. + * callbacks. This is the case for: cipher. * @cra_u: Callbacks implementing the transformation. This is a union of * multiple structures. Depending on the type of transformation selected * by @cra_type and @cra_flags above, the associated structure must be @@ -335,8 +315,6 @@ struct compress_alg { * @cra_init. * @cra_u.cipher: Union member which contains a single-block symmetric cipher * definition. See @struct @cipher_alg. - * @cra_u.compress: Union member which contains a (de)compression algorithm. - * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used @@ -366,7 +344,6 @@ struct crypto_alg { union { struct cipher_alg cipher; - struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); @@ -440,10 +417,6 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_comp { - struct crypto_tfm base; -}; - /* * Transform user interface. */ @@ -500,53 +473,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_comp *)tfm; -} - -static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) -{ - return &tfm->base; -} - -static inline void crypto_free_comp(struct crypto_comp *tfm) -{ - crypto_free_tfm(crypto_comp_tfm(tfm)); -} - -static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); -} - -static inline const char *crypto_comp_name(struct crypto_comp *tfm) -{ - return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); -} - -int crypto_comp_compress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - -int crypto_comp_decompress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - static inline void crypto_reqchain_init(struct crypto_async_request *req) { req->err = -EINPROGRESS; -- cgit v1.2.3 From ca17aa664054a5b809dc823ff1c202370ef398ef Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 15 Mar 2025 21:57:47 -0700 Subject: crypto: lib/chacha - remove unused arch-specific init support All implementations of chacha_init_arch() just call chacha_init_generic(), so it is pointless. Just delete it, and replace chacha_init() with what was previously chacha_init_generic(). Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- include/crypto/chacha.h | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'include') diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 5bae6a55b333..f8cc073bba41 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -62,8 +62,7 @@ static inline void chacha_init_consts(u32 *state) state[3] = CHACHA_CONSTANT_TE_K; } -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) { chacha_init_consts(state); state[4] = key[0]; @@ -80,14 +79,6 @@ static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) state[15] = get_unaligned_le32(iv + 12); } -static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) - chacha_init_arch(state, key, iv); - else - chacha_init_generic(state, key, iv); -} - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds); void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, -- cgit v1.2.3