summaryrefslogtreecommitdiff
path: root/include/linux/kasan.h
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2021-03-08 10:11:33 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2021-03-08 10:11:33 -0300
commit009ef05f98129aa91c62c3baab859ba593a15bb2 (patch)
treef3414f08d636a597545b1e4f443b373b9d6d8f4b /include/linux/kasan.h
parent2777b81b379df772defd654bc4d3fa82dca17a4b (diff)
parent144c79ef33536b4ecb4951e07dbc1f2b7fa99d32 (diff)
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up the fixes sent for v5.12 and continue development based on v5.12-rc2, i.e. without the swap on file bug. This also gets a slightly newer and better tools/perf/arch/arm/util/cs-etm.c patch version, using the BIT() macro, that had already been slated to v5.13 but ended up going to v5.12-rc1 on an older version. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'include/linux/kasan.h')
-rw-r--r--include/linux/kasan.h48
1 files changed, 36 insertions, 12 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0aea9e2a2a01..b91732bd05d7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
+ bool is_kmalloc;
};
#ifdef CONFIG_KASAN_HW_TAGS
@@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
__kasan_cache_create(cache, size, flags);
}
+void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
+static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
+{
+ if (kasan_enabled())
+ __kasan_cache_create_kmalloc(cache);
+}
+
size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
@@ -185,19 +193,25 @@ static __always_inline void * __must_check kasan_init_slab_obj(
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, ip);
+ return __kasan_slab_free(s, object, _RET_IP_);
return false;
}
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
+
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
{
if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, ip);
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -240,13 +254,19 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
-void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
{
if (kasan_enabled())
- __kasan_kfree_large(ptr, ip);
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
}
+
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);
@@ -266,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
+static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
@@ -277,12 +298,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
return false;
}
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
@@ -302,7 +323,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline bool kasan_check_byte(const void *address)
+{
+ return true;
+}
#endif /* CONFIG_KASAN */