summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/crypto/aead.h87
-rw-r--r--include/crypto/algapi.h12
-rw-r--r--include/crypto/df_sp80090a.h28
-rw-r--r--include/crypto/drbg.h25
-rw-r--r--include/crypto/internal/drbg.h54
-rw-r--r--include/crypto/internal/skcipher.h48
-rw-r--r--include/crypto/rng.h11
-rw-r--r--include/crypto/scatterwalk.h117
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/linux/cgroup.h14
-rw-r--r--include/linux/console.h68
-rw-r--r--include/linux/context_tracking_state.h44
-rw-r--r--include/linux/cpuset.h9
-rw-r--r--include/linux/fault-inject.h8
-rw-r--r--include/linux/gfp_types.h6
-rw-r--r--include/linux/kdb.h16
-rw-r--r--include/linux/kernel_read_file.h1
-rw-r--r--include/linux/key-type.h9
-rw-r--r--include/linux/lsm_hooks.h73
-rw-r--r--include/linux/memfd.h2
-rw-r--r--include/linux/mempool.h58
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/page-flags.h16
-rw-r--r--include/linux/prandom.h6
-rw-r--r--include/linux/random.h15
-rw-r--r--include/linux/rculist_nulls.h6
-rw-r--r--include/linux/rhashtable.h70
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/sched/ext.h27
-rw-r--r--include/linux/security.h3
-rw-r--r--include/linux/srcu.h147
-rw-r--r--include/linux/srcutiny.h31
-rw-r--r--include/linux/srcutree.h99
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/soc/fsl/caam-blob.h26
-rw-r--r--include/trace/events/sched_ext.h39
36 files changed, 872 insertions, 313 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0e8a41638678..8e66a1fa9c78 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -159,6 +159,21 @@ struct crypto_aead {
struct crypto_tfm base;
};
+struct crypto_sync_aead {
+ struct crypto_aead base;
+};
+
+#define MAX_SYNC_AEAD_REQSIZE 384
+
+#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_desc[sizeof(struct aead_request) + \
+ MAX_SYNC_AEAD_REQSIZE \
+ ] CRYPTO_MINALIGN_ATTR; \
+ struct aead_request *name = \
+ (((struct aead_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_aead_tfm((_tfm)), \
+ (void *)__##name##_desc)
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
@@ -180,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_tfm(&tfm->base);
+}
+
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
@@ -196,6 +218,11 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+
/**
* crypto_has_aead() - Search for the availability of an aead.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -238,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
+static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_ivsize(&tfm->base);
+}
+
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
@@ -255,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_authsize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
@@ -265,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
+static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_maxauthsize(&tfm->base);
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -280,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
+static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
@@ -300,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
@@ -319,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
+static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(&tfm->base, key, keylen);
+}
+
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
@@ -331,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_aead_setauthsize(&tfm->base, authsize);
+}
+
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
+static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_aead, base);
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -417,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(tfm);
}
+static inline void aead_request_set_sync_tfm(struct aead_request *req,
+ struct crypto_sync_aead *tfm)
+{
+ aead_request_set_tfm(req, &tfm->base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index fc4574940636..05deea9dac5e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -107,6 +107,18 @@ struct crypto_queue {
unsigned int max_qlen;
};
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
new file mode 100644
index 000000000000..6b25305fe611
--- /dev/null
+++ b/include/crypto/df_sp80090a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ */
+
+#ifndef _CRYPTO_DF80090A_H
+#define _CRYPTO_DF80090A_H
+
+#include <crypto/internal/cipher.h>
+#include <crypto/aes.h>
+
+static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
+{
+ return statelen + /* df_data */
+ blocklen + /* pad */
+ blocklen + /* iv */
+ statelen + blocklen; /* temp */
+}
+
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+ unsigned char *df_data,
+ size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen);
+
+#endif /* _CRYPTO_DF80090A_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index af5ad51d3eef..2d42518cbdce 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
@@ -54,30 +55,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
-/*
- * Concatenation Helper and string operation helper
- *
- * SP800-90A requires the concatenation of different data. To avoid copying
- * buffers around or allocate additional memory, the following data structure
- * is used to point to the original memory with its size. In addition, it
- * is used to build a linked list. The linked list defines the concatenation
- * of individual buffers. The order of memory block referenced in that
- * linked list determines the order of concatenation.
- */
-struct drbg_string {
- const unsigned char *buf;
- size_t len;
- struct list_head list;
-};
-
-static inline void drbg_string_fill(struct drbg_string *string,
- const unsigned char *buf, size_t len)
-{
- string->buf = buf;
- string->len = len;
- INIT_LIST_HEAD(&string->list);
-}
-
struct drbg_state;
typedef uint32_t drbg_flag_t;
diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h
new file mode 100644
index 000000000000..371e52dcee6c
--- /dev/null
+++ b/include/crypto/internal/drbg.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _INTERNAL_DRBG_H
+#define _INTERNAL_DRBG_H
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @val value to be converted
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
+ */
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
+{
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *)buf;
+
+ conversion->conv = cpu_to_be32(val);
+}
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+#endif //_INTERNAL_DRBG_H
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index d5aa535263f6..0cad8e7364c8 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,7 +10,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
-#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -55,6 +54,47 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -171,6 +211,7 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -181,6 +222,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index f8224cc390f8..d451b54b322a 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -169,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
- * is initialized with the caller provided seed or automatically, depending
- * on the random number generator type (the ANSI X9.31 RNG requires
- * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
- * The seed is provided as a parameter to this function call. The provided seed
- * should have the length of the seed size defined for the random number
- * generator as defined by crypto_rng_seedsize.
+ * is initialized with the caller provided seed or automatically, depending on
+ * the random number generator type. (The SP800-90A DRBGs perform an automatic
+ * seeding.) The seed is provided as a parameter to this function call. The
+ * provided seed should have the length of the seed size defined for the random
+ * number generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 83d14376ff2b..624fab589c2c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,64 +11,11 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <linux/errno.h>
+#include <crypto/algapi.h>
+
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-struct scatter_walk {
- /* Must be the first member, see struct skcipher_walk. */
- union {
- void *const addr;
-
- /* Private API field, do not touch. */
- union crypto_no_such_thing *__addr;
- };
- struct scatterlist *sg;
- unsigned int offset;
-};
-
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int nbytes;
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -227,6 +174,34 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
scatterwalk_advance(walk, nbytes);
}
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
+{
+ unsigned int num_pages;
+
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (unsigned int i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
+}
+
/**
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
* @walk: the scatter_walk
@@ -240,27 +215,9 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
unsigned int nbytes)
{
scatterwalk_unmap(walk);
- /*
- * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
- * relying on flush_dcache_page() being a no-op when not implemented,
- * since otherwise the BUG_ON in sg_page() does not get optimized out.
- * This also avoids having to consider whether the loop would get
- * reliably optimized out or not.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
- struct page *base_page;
- unsigned int offset;
- int start, end, i;
-
- base_page = sg_page(walk->sg);
- offset = walk->offset;
- start = offset >> PAGE_SHIFT;
- end = start + (nbytes >> PAGE_SHIFT);
- end += (offset_in_page(offset) + offset_in_page(nbytes) +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = start; i < end; i++)
- flush_dcache_page(base_page + i);
- }
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
+ walk->offset, nbytes);
scatterwalk_advance(walk, nbytes);
}
@@ -296,12 +253,4 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
-int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
-
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 69a13e1e5b2e..1b91c8f98688 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -49,7 +49,7 @@ enum asymmetric_payload_bits {
*/
struct asymmetric_key_id {
unsigned short len;
- unsigned char data[];
+ unsigned char data[] __counted_by(len);
};
struct asymmetric_key_ids {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 6ed477338b16..bc892e3b37ee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -137,9 +137,10 @@ extern void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
-void cgroup_exit(struct task_struct *p);
-void cgroup_release(struct task_struct *p);
-void cgroup_free(struct task_struct *p);
+void cgroup_task_exit(struct task_struct *p);
+void cgroup_task_dead(struct task_struct *p);
+void cgroup_task_release(struct task_struct *p);
+void cgroup_task_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
@@ -680,9 +681,10 @@ static inline void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
static inline void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
-static inline void cgroup_exit(struct task_struct *p) {}
-static inline void cgroup_release(struct task_struct *p) {}
-static inline void cgroup_free(struct task_struct *p) {}
+static inline void cgroup_task_exit(struct task_struct *p) {}
+static inline void cgroup_task_dead(struct task_struct *p) {}
+static inline void cgroup_task_release(struct task_struct *p) {}
+static inline void cgroup_task_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
diff --git a/include/linux/console.h b/include/linux/console.h
index 031a58dc2b91..fc9f5c5c1b04 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -19,6 +19,7 @@
#include <linux/irq_work.h>
#include <linux/rculist.h>
#include <linux/rcuwait.h>
+#include <linux/smp.h>
#include <linux/types.h>
#include <linux/vesa.h>
@@ -185,6 +186,8 @@ static inline void con_debug_leave(void) { }
* printing callbacks must not be called.
* @CON_NBCON: Console can operate outside of the legacy style console_lock
* constraints.
+ * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
+ * therefore only used by nbcon_atomic_flush_unsafe().
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -196,6 +199,7 @@ enum cons_flags {
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
CON_NBCON = BIT(8),
+ CON_NBCON_ATOMIC_UNSAFE = BIT(9),
};
/**
@@ -602,16 +606,80 @@ static inline bool console_is_registered(const struct console *con)
extern void nbcon_cpu_emergency_enter(void);
extern void nbcon_cpu_emergency_exit(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
+extern bool nbcon_allow_unsafe_takeover(void);
+extern bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt);
+extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
+
+/*
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+ */
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ if (use_atomic) {
+ /* The write_atomic() callback is optional. */
+ if (!con->write_atomic)
+ return false;
+
+ /*
+ * An unsafe write_atomic() callback is only usable
+ * when unsafe takeovers are allowed.
+ */
+ if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
+ return false;
+ }
+
+ /*
+ * For the !use_atomic case, @printk_kthreads_running is not
+ * checked because the write_thread() callback is also used
+ * via the legacy loop when the printer threads are not
+ * available.
+ */
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
#else
static inline void nbcon_cpu_emergency_enter(void) { }
static inline void nbcon_cpu_emergency_exit(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len) { }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
+static inline bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { }
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
#endif
extern int console_set_on_cmdline;
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 7b8433d5a8ef..0b81248aa03e 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -18,12 +18,6 @@ enum ctx_state {
CT_STATE_MAX = 4,
};
-/* Odd value for watching, else even. */
-#define CT_RCU_WATCHING CT_STATE_MAX
-
-#define CT_STATE_MASK (CT_STATE_MAX - 1)
-#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
-
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
@@ -44,9 +38,45 @@ struct context_tracking {
#endif
};
+/*
+ * We cram two different things within the same atomic variable:
+ *
+ * CT_RCU_WATCHING_START CT_STATE_START
+ * | |
+ * v v
+ * MSB [ RCU watching counter ][ context_state ] LSB
+ * ^ ^
+ * | |
+ * CT_RCU_WATCHING_END CT_STATE_END
+ *
+ * Bits are used from the LSB upwards, so unused bits (if any) will always be in
+ * upper bits of the variable.
+ */
#ifdef CONFIG_CONTEXT_TRACKING
+#define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
+
+#define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1)
+#define CT_STATE_START 0
+#define CT_STATE_END (CT_STATE_START + CT_STATE_WIDTH - 1)
+
+#define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH)
+#define CT_RCU_WATCHING_WIDTH (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH)
+#define CT_RCU_WATCHING_START (CT_STATE_END + 1)
+#define CT_RCU_WATCHING_END (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1)
+#define CT_RCU_WATCHING BIT(CT_RCU_WATCHING_START)
+
+#define CT_STATE_MASK GENMASK(CT_STATE_END, CT_STATE_START)
+#define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START)
+
+#define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH)
+
+static_assert(CT_STATE_WIDTH +
+ CT_RCU_WATCHING_WIDTH +
+ CT_UNUSED_WIDTH ==
+ CT_SIZE);
+
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-#endif
+#endif /* CONFIG_CONTEXT_TRACKING */
#ifdef CONFIG_CONTEXT_TRACKING_USER
static __always_inline int __ct_state(void)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2ddb256187b5..a98d3330385c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -74,6 +74,7 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern bool cpuset_cpu_is_isolated(int cpu);
@@ -195,10 +196,16 @@ static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+ struct cpumask *mask)
+{
+ cpumask_copy(mask, task_cpu_possible_mask(p));
+}
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, task_cpu_possible_mask(p));
+ cpuset_cpus_allowed_locked(p, mask);
}
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 8c829d28dcf3..58fd14c82270 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -8,6 +8,10 @@
struct dentry;
struct kmem_cache;
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#ifdef CONFIG_FAULT_INJECTION
#include <linux/atomic.h>
@@ -36,10 +40,6 @@ struct fault_attr {
struct dentry *dname;
};
-enum fault_flags {
- FAULT_NOWARN = 1 << 0,
-};
-
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 65db9349f905..3de43b12209e 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -55,9 +55,7 @@ enum {
#ifdef CONFIG_LOCKDEP
___GFP_NOLOCKDEP_BIT,
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
___GFP_NO_OBJ_EXT_BIT,
-#endif
___GFP_LAST_BIT
};
@@ -98,11 +96,7 @@ enum {
#else
#define ___GFP_NOLOCKDEP 0
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
-#else
-#define ___GFP_NO_OBJ_EXT 0
-#endif
/*
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index ecbf819deeca..741c58e86431 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -14,6 +14,7 @@
*/
#include <linux/list.h>
+#include <linux/smp.h>
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
@@ -207,11 +208,26 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
/* Dynamic kdb shell command registration */
extern int kdb_register(kdbtab_t *cmd);
extern void kdb_unregister(kdbtab_t *cmd);
+
+/* Return true when KDB as locked for printing a message on this CPU. */
+static inline
+bool kdb_printf_on_this_cpu(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because the task could
+ * not get migrated when KDB has locked for printing on this CPU.
+ */
+ return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id());
+}
+
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(kdbtab_t *cmd) { return 0; }
static inline void kdb_unregister(kdbtab_t *cmd) {}
+
+static inline bool kdb_printf_on_this_cpu(void) { return false; }
+
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
index 90451e2e12bd..d613a7b4dd35 100644
--- a/include/linux/kernel_read_file.h
+++ b/include/linux/kernel_read_file.h
@@ -14,6 +14,7 @@
id(KEXEC_INITRAMFS, kexec-initramfs) \
id(POLICY, security-policy) \
id(X509_CERTIFICATE, x509-certificate) \
+ id(MODULE_COMPRESSED, kernel-module-compressed) \
id(MAX_ID, )
#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 5caf3ce82373..bb97bd3e5af4 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -107,11 +107,14 @@ struct key_type {
*/
int (*match_preparse)(struct key_match_data *match_data);
- /* Free preparsed match data (optional). This should be supplied it
- * ->match_preparse() is supplied. */
+ /*
+ * Free preparsed match data (optional). This should be supplied if
+ * ->match_preparse() is supplied.
+ */
void (*match_free)(struct key_match_data *match_data);
- /* clear some of the data from a key on revokation (optional)
+ /*
+ * Clear some of the data from a key on revocation (optional).
* - the key's semaphore will be write-locked by the caller
*/
void (*revoke)(struct key *key);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 79ec5a2bdcca..b92008641242 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -102,23 +102,23 @@ struct security_hook_list {
* Security blob size or offset data.
*/
struct lsm_blob_sizes {
- int lbs_cred;
- int lbs_file;
- int lbs_ib;
- int lbs_inode;
- int lbs_sock;
- int lbs_superblock;
- int lbs_ipc;
- int lbs_key;
- int lbs_msg_msg;
- int lbs_perf_event;
- int lbs_task;
- int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
- int lbs_tun_dev;
- int lbs_bdev;
- int lbs_bpf_map;
- int lbs_bpf_prog;
- int lbs_bpf_token;
+ unsigned int lbs_cred;
+ unsigned int lbs_file;
+ unsigned int lbs_ib;
+ unsigned int lbs_inode;
+ unsigned int lbs_sock;
+ unsigned int lbs_superblock;
+ unsigned int lbs_ipc;
+ unsigned int lbs_key;
+ unsigned int lbs_msg_msg;
+ unsigned int lbs_perf_event;
+ unsigned int lbs_task;
+ unsigned int lbs_xattr_count; /* num xattr slots in new_xattrs array */
+ unsigned int lbs_tun_dev;
+ unsigned int lbs_bdev;
+ unsigned int lbs_bpf_map;
+ unsigned int lbs_bpf_prog;
+ unsigned int lbs_bpf_token;
};
/*
@@ -151,13 +151,36 @@ enum lsm_order {
LSM_ORDER_LAST = 1, /* This is only for integrity. */
};
+/**
+ * struct lsm_info - Define an individual LSM for the LSM framework.
+ * @id: LSM name/ID info
+ * @order: ordering with respect to other LSMs, optional
+ * @flags: descriptive flags, optional
+ * @blobs: LSM blob sharing, optional
+ * @enabled: controlled by CONFIG_LSM, optional
+ * @init: LSM specific initialization routine
+ * @initcall_pure: LSM callback for initcall_pure() setup, optional
+ * @initcall_early: LSM callback for early_initcall setup, optional
+ * @initcall_core: LSM callback for core_initcall() setup, optional
+ * @initcall_subsys: LSM callback for subsys_initcall() setup, optional
+ * @initcall_fs: LSM callback for fs_initcall setup, optional
+ * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_late: LSM callback for late_initcall() setup, optional
+ */
struct lsm_info {
- const char *name; /* Required. */
- enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */
- unsigned long flags; /* Optional: flags describing LSM */
- int *enabled; /* Optional: controlled by CONFIG_LSM */
- int (*init)(void); /* Required. */
- struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
+ const struct lsm_id *id;
+ enum lsm_order order;
+ unsigned long flags;
+ struct lsm_blob_sizes *blobs;
+ int *enabled;
+ int (*init)(void);
+ int (*initcall_pure)(void);
+ int (*initcall_early)(void);
+ int (*initcall_core)(void);
+ int (*initcall_subsys)(void);
+ int (*initcall_fs)(void);
+ int (*initcall_device)(void);
+ int (*initcall_late)(void);
};
#define DEFINE_LSM(lsm) \
@@ -170,11 +193,9 @@ struct lsm_info {
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
+
/* DO NOT tamper with these variables outside of the LSM framework */
-extern char *lsm_names;
extern struct lsm_static_calls_table static_calls_table __ro_after_init;
-extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
-extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
/**
* lsm_get_xattr_slot - Return the next available slot and increment the index
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 6f606d9573c3..cc74de3dbcfe 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -4,6 +4,8 @@
#include <linux/file.h>
+#define MEMFD_ANON_NAME "[memfd]"
+
#ifdef CONFIG_MEMFD_CREATE
extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 34941a4b9026..e8e440e04a06 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -27,32 +27,31 @@ typedef struct mempool {
wait_queue_head_t wait;
} mempool_t;
-static inline bool mempool_initialized(mempool_t *pool)
+static inline bool mempool_initialized(struct mempool *pool)
{
return pool->elements != NULL;
}
-static inline bool mempool_is_saturated(mempool_t *pool)
+static inline bool mempool_is_saturated(struct mempool *pool)
{
return READ_ONCE(pool->curr_nr) >= pool->min_nr;
}
-void mempool_exit(mempool_t *pool);
-int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int node_id);
-
-int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
+void mempool_exit(struct mempool *pool);
+int mempool_init_node(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int node_id);
+int mempool_init_noprof(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data);
#define mempool_init(...) \
alloc_hooks(mempool_init_noprof(__VA_ARGS__))
-extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
-
-extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int nid);
+struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+struct mempool *mempool_create_node_noprof(int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int nid);
#define mempool_create_node(...) \
alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
@@ -60,15 +59,21 @@ extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_
mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
GFP_KERNEL, NUMA_NO_NODE)
-extern int mempool_resize(mempool_t *pool, int new_min_nr);
-extern void mempool_destroy(mempool_t *pool);
+int mempool_resize(struct mempool *pool, int new_min_nr);
+void mempool_destroy(struct mempool *pool);
-extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc;
+void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
#define mempool_alloc(...) \
alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
+int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
+ unsigned int count, unsigned int allocated);
+#define mempool_alloc_bulk(...) \
+ alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
-extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
-extern void mempool_free(void *element, mempool_t *pool);
+void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
+void mempool_free(void *element, struct mempool *pool);
+unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
+ unsigned int count);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -97,19 +102,6 @@ void mempool_kfree(void *element, void *pool_data);
mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
(void *)(unsigned long)(_size))
-void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
-void mempool_kvfree(void *element, void *pool_data);
-
-static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size)
-{
- return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
-}
-
-static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size)
-{
- return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
-}
-
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
* allocates pages of the order specified by pool_data
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index b42e64734968..01b6c9d9956f 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -109,7 +109,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.head = NULL, \
.srcuu = __SRCU_USAGE_INIT(name.srcuu), \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu, 0), \
}
#define ATOMIC_NOTIFIER_HEAD(name) \
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0091ad1986bf..f7a0e4af0c73 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1048,19 +1048,7 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
-FOLIO_TYPE_OPS(slab, slab)
-
-/**
- * PageSlab - Determine if the page belongs to the slab allocator
- * @page: The page to test.
- *
- * Context: Any context.
- * Return: True for slab pages, false for any other kind of page.
- */
-static inline bool PageSlab(const struct page *page)
-{
- return folio_test_slab(page_folio(page));
-}
+PAGE_TYPE_OPS(Slab, slab, slab)
#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
@@ -1076,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
-FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index f2ed5b72b3d6..ff7dcc3fa105 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -47,10 +47,4 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
state->s4 = __seed(i, 128U);
}
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
-
#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 333cecfca93f..8a8064dc3970 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -130,21 +130,6 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes)
return ret;
}
-#define declare_get_random_var_wait(name, ret_type) \
- static inline int get_random_ ## name ## _wait(ret_type *out) { \
- int ret = wait_for_random_bytes(); \
- if (unlikely(ret)) \
- return ret; \
- *out = get_random_ ## name(); \
- return 0; \
- }
-declare_get_random_var_wait(u8, u8)
-declare_get_random_var_wait(u16, u16)
-declare_get_random_var_wait(u32, u32)
-declare_get_random_var_wait(u64, u32)
-declare_get_random_var_wait(long, unsigned long)
-#undef declare_get_random_var
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 89186c499dd4..d5a656cc4c6a 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -138,7 +138,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
if (last) {
WRITE_ONCE(n->next, last->next);
- n->pprev = &last->next;
+ WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
@@ -148,8 +148,8 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
/* after that hlist_nulls_del will work */
static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
{
- n->pprev = &n->next;
- n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
+ WRITE_ONCE(n->pprev, &n->next);
+ WRITE_ONCE(n->next, (struct hlist_nulls_node *)NULLS_MARKER(NULL));
}
/**
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 05a221ce79a6..08e664b21f5a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_irq_restore(flags);
}
-static inline struct rhash_head *__rht_ptr(
- struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+enum rht_lookup_freq {
+ RHT_LOOKUP_NORMAL,
+ RHT_LOOKUP_LIKELY,
+};
+
+static __always_inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
{
- return (struct rhash_head *)
- ((unsigned long)p & ~BIT(0) ?:
- (unsigned long)RHT_NULLS_MARKER(bkt));
+ unsigned long p_val = (unsigned long)p & ~BIT(0);
+
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
+
+ if (freq == RHT_LOOKUP_LIKELY)
+ return (struct rhash_head *)
+ (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
+ else
+ return (struct rhash_head *)
+ (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
+static __always_inline struct rhash_head *__rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
+{
+ return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
+}
+
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_all(*bkt), bkt);
+ return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
- return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -588,7 +610,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
/* Internal function, do not use. */
static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -599,12 +622,13 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -643,11 +667,22 @@ static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
+static __always_inline void *rhashtable_lookup_likely(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? rht_obj(ht, he) : NULL;
+}
+
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -693,11 +728,22 @@ static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
+static __always_inline struct rhlist_head *rhltable_lookup_likely(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fac12bb7dbe4..d395f2810fac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1324,7 +1324,10 @@ struct task_struct {
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
-#endif
+#ifdef CONFIG_PREEMPT_RT
+ struct llist_node cg_dead_lnode;
+#endif /* CONFIG_PREEMPT_RT */
+#endif /* CONFIG_CGROUPS */
#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index d82b7a9b0658..bcb962d5ee7d 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -17,7 +17,18 @@
enum scx_public_consts {
SCX_OPS_NAME_LEN = 128,
+ /*
+ * %SCX_SLICE_DFL is used to refill slices when the BPF scheduler misses
+ * to set the slice for a task that is selected for execution.
+ * %SCX_EV_REFILL_SLICE_DFL counts the number of times the default slice
+ * refill has been triggered.
+ *
+ * %SCX_SLICE_BYPASS is used as the slice for all tasks in the bypass
+ * mode. As making forward progress for all tasks is the main goal of
+ * the bypass mode, a shorter slice is used.
+ */
SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_BYPASS = 5 * 1000000, /* 5ms */
SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
};
@@ -46,6 +57,7 @@ enum scx_dsq_id_flags {
SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_BYPASS = SCX_DSQ_FLAG_BUILTIN | 3,
SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
};
@@ -58,6 +70,7 @@ enum scx_dsq_id_flags {
*/
struct scx_dispatch_q {
raw_spinlock_t lock;
+ struct task_struct __rcu *first_task; /* lockless peek at head */
struct list_head list; /* tasks in dispatch order */
struct rb_root priq; /* used to order by p->scx.dsq_vtime */
u32 nr;
@@ -136,6 +149,13 @@ struct scx_dsq_list_node {
u32 priv; /* can be used by iter cursor */
};
+#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+ (struct scx_dsq_list_node) { \
+ .node = LIST_HEAD_INIT((__node).node), \
+ .flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
+ .priv = (__priv), \
+ }
+
/*
* The following is embedded in task_struct and contains all fields necessary
* for a task to be scheduled by SCX.
@@ -207,16 +227,18 @@ struct sched_ext_entity {
struct list_head tasks_node;
};
-void sched_ext_free(struct task_struct *p);
+void sched_ext_dead(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
+bool scx_hardlockup(int cpu);
bool scx_rcu_cpu_stall(void);
#else /* !CONFIG_SCHED_CLASS_EXT */
-static inline void sched_ext_free(struct task_struct *p) {}
+static inline void sched_ext_dead(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_hardlockup(int cpu) { return false; }
static inline bool scx_rcu_cpu_stall(void) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
@@ -228,6 +250,7 @@ struct scx_task_group {
u64 bw_period_us;
u64 bw_quota_us;
u64 bw_burst_us;
+ bool idle;
#endif
};
diff --git a/include/linux/security.h b/include/linux/security.h
index 92ac3f27b973..eb36451ce41f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -85,6 +85,7 @@ struct timezone;
enum lsm_event {
LSM_POLICY_CHANGE,
+ LSM_STARTED_ALL,
};
struct dm_verity_digest {
@@ -167,8 +168,6 @@ struct lsm_prop {
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
-extern u32 lsm_active_cnt;
-extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index ada65b58bc4c..344ad51c8f6c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -25,8 +25,12 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
- struct lock_class_key *key);
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+#ifndef CONFIG_TINY_SRCU
+int __init_srcu_struct_fast(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+int __init_srcu_struct_fast_updown(struct srcu_struct *ssp, const char *name,
+ struct lock_class_key *key);
+#endif // #ifndef CONFIG_TINY_SRCU
#define init_srcu_struct(ssp) \
({ \
@@ -35,22 +39,42 @@ int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
+#define init_srcu_struct_fast(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast((ssp), #ssp, &__srcu_key); \
+})
+
+#define init_srcu_struct_fast_updown(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast_updown((ssp), #ssp, &__srcu_key); \
+})
+
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *ssp);
+#ifndef CONFIG_TINY_SRCU
+int init_srcu_struct_fast(struct srcu_struct *ssp);
+int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
+#endif // #ifndef CONFIG_TINY_SRCU
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
-#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
-#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
-// 0x4 // SRCU-lite is no longer with us.
-#define SRCU_READ_FLAVOR_FAST 0x8 // srcu_read_lock_fast().
-#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
- SRCU_READ_FLAVOR_FAST) // All of the above.
-#define SRCU_READ_FLAVOR_SLOWGP SRCU_READ_FLAVOR_FAST
+#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
+#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
+// 0x4 // SRCU-lite is no longer with us.
+#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
+ SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
+ // All of the above.
+#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
@@ -259,29 +283,78 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section, but for a light-weight
- * smp_mb()-free reader. See srcu_read_lock() for more information.
- *
- * If srcu_read_lock_fast() is ever used on an srcu_struct structure,
- * then none of the other flavors may be used, whether before, during,
- * or after. Note that grace-period auto-expediting is disabled for _fast
- * srcu_struct structures because auto-expedited grace periods invoke
- * synchronize_rcu_expedited(), IPIs and all.
- *
- * Note that srcu_read_lock_fast() can be invoked only from those contexts
- * where RCU is watching, that is, from contexts where it would be legal
- * to invoke rcu_read_lock(). Otherwise, lockdep will complain.
+ * smp_mb()-free reader. See srcu_read_lock() for more information. This
+ * function is NMI-safe, in a manner similar to srcu_read_lock_nmisafe().
+ *
+ * For srcu_read_lock_fast() to be used on an srcu_struct structure,
+ * that structure must have been defined using either DEFINE_SRCU_FAST()
+ * or DEFINE_STATIC_SRCU_FAST() on the one hand or initialized with
+ * init_srcu_struct_fast() on the other. Such an srcu_struct structure
+ * cannot be passed to any non-fast variant of srcu_read_{,un}lock() or
+ * srcu_{down,up}_read(). In kernels built with CONFIG_PROVE_RCU=y,
+ * __srcu_check_read_flavor() will complain bitterly if you ignore this
+ * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast srcu_struct
+ * structures because SRCU-fast expedited grace periods invoke
+ * synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast() function can be invoked only from those
+ * contexts where RCU is watching, that is, from contexts where it would
+ * be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
{
struct srcu_ctr __percpu *retval;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast().");
- srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
retval = __srcu_read_lock_fast(ssp);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
+/**
+ * srcu_read_lock_fast_updown - register a new reader for an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information.
+ * This function is compatible with srcu_down_read_fast(), but is not
+ * NMI-safe.
+ *
+ * For srcu_read_lock_fast_updown() to be used on an srcu_struct
+ * structure, that structure must have been defined using either
+ * DEFINE_SRCU_FAST_UPDOWN() or DEFINE_STATIC_SRCU_FAST_UPDOWN() on the one
+ * hand or initialized with init_srcu_struct_fast_updown() on the other.
+ * Such an srcu_struct structure cannot be passed to any non-fast-updown
+ * variant of srcu_read_{,un}lock() or srcu_{down,up}_read(). In kernels
+ * built with CONFIG_PROVE_RCU=y, __srcu_check_read_flavor() will complain
+ * bitterly if you ignore this * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast-updown
+ * srcu_struct structures because SRCU-fast-updown expedited grace periods
+ * invoke synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast-updown grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast_updown() function can be invoked only from
+ * those contexts where RCU is watching, that is, from contexts where
+ * it would be legal to invoke rcu_read_lock(). Otherwise, lockdep will
+ * complain.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+__acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast_updown().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ retval = __srcu_read_lock_fast_updown(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
/*
* Used by tracing, cannot be traced and cannot call lockdep.
* See srcu_read_lock_fast() for more information.
@@ -291,7 +364,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
{
struct srcu_ctr __percpu *retval;
- srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
retval = __srcu_read_lock_fast(ssp);
return retval;
}
@@ -305,14 +378,15 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
* srcu_down_read() for more information.
*
* The same srcu_struct may be used concurrently by srcu_down_read_fast()
- * and srcu_read_lock_fast().
+ * and srcu_read_lock_fast(). However, the same definition/initialization
+ * requirements called out for srcu_read_lock_safe() apply.
*/
static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
- srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
- return __srcu_read_lock_fast(ssp);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ return __srcu_read_lock_fast_updown(ssp);
}
/**
@@ -408,6 +482,23 @@ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ct
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
}
+/**
+ * srcu_read_unlock_fast_updown - unregister a old reader from an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast_updown().
+ *
+ * Exit an SRCU-fast-updown read-side critical section.
+ */
+static inline void
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "RCU must be watching srcu_read_unlock_fast_updown().");
+}
+
/*
* Used by tracing, cannot be traced and cannot call lockdep.
* See srcu_read_unlock_fast() for more information.
@@ -431,9 +522,9 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
__releases(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
- srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
- __srcu_read_unlock_fast(ssp, scp);
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast_updown().");
}
/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 51ce25f07930..e0698024667a 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -31,7 +31,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
-#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored) \
+#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
@@ -44,13 +44,25 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
#define DEFINE_STATIC_SRCU(name) \
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST_UPDOWN(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
// Dummy structure for srcu_notifier_head.
struct srcu_usage { };
#define __SRCU_USAGE_INIT(name) { }
+#define __init_srcu_struct_fast __init_srcu_struct
+#define __init_srcu_struct_fast_updown __init_srcu_struct
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#define init_srcu_struct_fast init_srcu_struct
+#define init_srcu_struct_fast_updown init_srcu_struct
+#endif // #ifndef CONFIG_DEBUG_LOCK_ALLOC
void synchronize_srcu(struct srcu_struct *ssp);
@@ -93,6 +105,17 @@ static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
+static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+{
+ return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
+}
+
+static inline
+void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
+}
+
static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
synchronize_srcu(ssp);
@@ -103,8 +126,8 @@ static inline void srcu_barrier(struct srcu_struct *ssp)
synchronize_srcu(ssp);
}
+static inline void srcu_expedite_current(struct srcu_struct *ssp) { }
#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
-#define srcu_check_read_flavor_force(ssp, read_flavor) do { } while (0)
/* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 42098e0fa0b7..d6f978b50472 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -42,6 +42,8 @@ struct srcu_data {
struct timer_list delay_work; /* Delay for CB invoking */
struct work_struct work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct rcu_head srcu_ec_head; /* For srcu_expedite_current() use. */
+ int srcu_ec_state; /* State for srcu_expedite_current(). */
struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
@@ -102,6 +104,7 @@ struct srcu_usage {
struct srcu_struct {
struct srcu_ctr __percpu *srcu_ctrp;
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ u8 srcu_reader_flavor;
struct lockdep_map dep_map;
struct srcu_usage *srcu_sup; /* Update-side data. */
};
@@ -135,6 +138,11 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
+/* Values for srcu_expedite_current() state (->srcu_ec_state). */
+#define SRCU_EC_IDLE 0
+#define SRCU_EC_PENDING 1
+#define SRCU_EC_REPOST 2
+
/*
* Values for initializing gp sequence fields. Higher values allow wrap arounds to
* occur earlier.
@@ -155,20 +163,21 @@ struct srcu_struct {
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
-#define __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+#define __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
.srcu_sup = &usage_name, \
+ .srcu_reader_flavor = fast, \
__SRCU_DEP_MAP_INIT(name)
-#define __SRCU_STRUCT_INIT_MODULE(name, usage_name) \
+#define __SRCU_STRUCT_INIT_MODULE(name, usage_name, fast) \
{ \
- __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
}
-#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name) \
+#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name, fast) \
{ \
.sda = &pcpu_name, \
.srcu_ctrp = &pcpu_name.srcu_ctrs[0], \
- __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
}
/*
@@ -189,27 +198,45 @@ struct srcu_struct {
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ *
+ * DEFINE_SRCU_FAST() and DEFINE_STATIC_SRCU_FAST create an srcu_struct
+ * and associated structures whose readers must be of the SRCU-fast variety.
+ * DEFINE_SRCU_FAST_UPDOWN() and DEFINE_STATIC_SRCU_FAST_UPDOWN() create
+ * an srcu_struct and associated structures whose readers must be of the
+ * SRCU-fast-updown variety. The key point (aside from error checking) with
+ * both varieties is that the grace periods must use synchronize_rcu()
+ * instead of smp_mb(), and given that the first (for example)
+ * srcu_read_lock_fast() might race with the first synchronize_srcu(),
+ * this different must be specified at initialization time.
*/
#ifdef MODULE
-# define __DEFINE_SRCU(name, is_static) \
+# define __DEFINE_SRCU(name, fast, is_static) \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage, \
+ fast); \
extern struct srcu_struct * const __srcu_struct_##name; \
struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
-# define __DEFINE_SRCU(name, is_static) \
+# define __DEFINE_SRCU(name, fast, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
is_static struct srcu_struct name = \
- __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
+ __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data, fast)
#endif
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, 0, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, 0, static)
+#define DEFINE_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, /* not static */)
+#define DEFINE_STATIC_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, static)
+#define DEFINE_SRCU_FAST_UPDOWN(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, \
+ /* not static */)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
+void srcu_expedite_current(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
// Converts a per-CPU pointer to an ->srcu_ctrs[] array element to that
@@ -289,23 +316,49 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
}
-void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
-
-// Record reader usage even for CONFIG_PROVE_RCU=n kernels. This is
-// needed only for flavors that require grace-period smp_mb() calls to be
-// promoted to synchronize_rcu().
-static inline void srcu_check_read_flavor_force(struct srcu_struct *ssp, int read_flavor)
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns a pointer that must be passed to the matching
+ * srcu_read_unlock_fast_updown(). This type of reader is compatible
+ * with srcu_down_read_fast() and srcu_up_read_fast().
+ *
+ * See the __srcu_read_lock_fast() comment for more details.
+ */
+static inline
+struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
{
- struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
+ struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
- if (likely(READ_ONCE(sdp->srcu_reader_flavor) & read_flavor))
- return;
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
+ barrier(); /* Avoid leaking the critical section. */
+ return scp;
+}
- // Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered.
- __srcu_check_read_flavor(ssp, read_flavor);
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_fast(), but it must be within the same task.
+ *
+ * Please see the __srcu_read_lock_fast() function's header comment for
+ * information on implicit RCU readers and NMI safety.
+ */
+static inline void notrace
+__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
}
-// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
+void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
+
+// Record SRCU-reader usage type only for CONFIG_PROVE_RCU=y kernels.
static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
{
if (IS_ENABLED(CONFIG_PROVE_RCU))
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index dc0338a783f3..b15360ff78d7 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -473,6 +473,7 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern struct tpm_chip *tpm_default_chip(void);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
+int tpm2_find_hash_alg(unsigned int crypto_id);
static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
{
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
index 937cac52f36d..922f7ec3e231 100644
--- a/include/soc/fsl/caam-blob.h
+++ b/include/soc/fsl/caam-blob.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024-2025 NXP
*/
#ifndef __CAAM_BLOB_GEN
@@ -12,11 +13,34 @@
#define CAAM_BLOB_KEYMOD_LENGTH 16
#define CAAM_BLOB_OVERHEAD (32 + 16)
#define CAAM_BLOB_MAX_LEN 4096
+#define CAAM_ENC_ALGO_CCM 0x1
+#define CAAM_ENC_ALGO_ECB 0x2
+#define CAAM_NONCE_SIZE 6
+#define CAAM_ICV_SIZE 6
+#define CAAM_CCM_OVERHEAD (CAAM_NONCE_SIZE + CAAM_ICV_SIZE)
struct caam_blob_priv;
/**
+ * struct caam_pkey_info - information for CAAM protected key
+ * @is_pkey: flag to identify, if the key is protected.
+ * @key_enc_algo: identifies the algorithm, ccm or ecb
+ * @plain_key_sz: size of plain key.
+ * @key_buf: contains key data
+ */
+struct caam_pkey_info {
+ u8 is_pkey;
+ u8 key_enc_algo;
+ u16 plain_key_sz;
+ u8 key_buf[];
+} __packed;
+
+/* sizeof struct caam_pkey_info */
+#define CAAM_PKEY_HEADER 4
+
+/**
* struct caam_blob_info - information for CAAM blobbing
+ * @pkey_info: pointer to keep protected key information
* @input: pointer to input buffer (must be DMAable)
* @input_len: length of @input buffer in bytes.
* @output: pointer to output buffer (must be DMAable)
@@ -26,6 +50,8 @@ struct caam_blob_priv;
* May not exceed %CAAM_BLOB_KEYMOD_LENGTH
*/
struct caam_blob_info {
+ struct caam_pkey_info pkey_info;
+
void *input;
size_t input_len;
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
index 50e4b712735a..d1bf5acd59c5 100644
--- a/include/trace/events/sched_ext.h
+++ b/include/trace/events/sched_ext.h
@@ -45,6 +45,45 @@ TRACE_EVENT(sched_ext_event,
)
);
+TRACE_EVENT(sched_ext_bypass_lb,
+
+ TP_PROTO(__u32 node, __u32 nr_cpus, __u32 nr_tasks, __u32 nr_balanced,
+ __u32 before_min, __u32 before_max,
+ __u32 after_min, __u32 after_max),
+
+ TP_ARGS(node, nr_cpus, nr_tasks, nr_balanced,
+ before_min, before_max, after_min, after_max),
+
+ TP_STRUCT__entry(
+ __field( __u32, node )
+ __field( __u32, nr_cpus )
+ __field( __u32, nr_tasks )
+ __field( __u32, nr_balanced )
+ __field( __u32, before_min )
+ __field( __u32, before_max )
+ __field( __u32, after_min )
+ __field( __u32, after_max )
+ ),
+
+ TP_fast_assign(
+ __entry->node = node;
+ __entry->nr_cpus = nr_cpus;
+ __entry->nr_tasks = nr_tasks;
+ __entry->nr_balanced = nr_balanced;
+ __entry->before_min = before_min;
+ __entry->before_max = before_max;
+ __entry->after_min = after_min;
+ __entry->after_max = after_max;
+ ),
+
+ TP_printk("node %u: nr_cpus=%u nr_tasks=%u nr_balanced=%u min=%u->%u max=%u->%u",
+ __entry->node, __entry->nr_cpus,
+ __entry->nr_tasks, __entry->nr_balanced,
+ __entry->before_min, __entry->after_min,
+ __entry->before_max, __entry->after_max
+ )
+);
+
#endif /* _TRACE_SCHED_EXT_H */
/* This part must be outside protection */