diff options
author | Jann Horn <jannh@google.com> | 2024-08-09 17:36:56 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2024-08-27 14:12:51 +0200 |
commit | b8c8ba73c68bb3c3e9dad22f488b86c540c839f9 (patch) | |
tree | f2deac9d7c013e58efa585debb67409ed5ec20bb /mm/kasan | |
parent | b3c34245756adada8a50bdaedbb3965b071c7b0a (diff) |
slub: Introduce CONFIG_SLUB_RCU_DEBUG
Currently, KASAN is unable to catch use-after-free in SLAB_TYPESAFE_BY_RCU
slabs because use-after-free is allowed within the RCU grace period by
design.
Add a SLUB debugging feature which RCU-delays every individual
kmem_cache_free() before either actually freeing the object or handing it
off to KASAN, and change KASAN to poison freed objects as normal when this
option is enabled.
For now I've configured Kconfig.debug to default-enable this feature in the
KASAN GENERIC and SW_TAGS modes; I'm not enabling it by default in HW_TAGS
mode because I'm not sure if it might have unwanted performance degradation
effects there.
Note that this is mostly useful with KASAN in the quarantine-based GENERIC
mode; SLAB_TYPESAFE_BY_RCU slabs are basically always also slabs with a
->ctor, and KASAN's assign_tag() currently has to assign fixed tags for
those, reducing the effectiveness of SW_TAGS/HW_TAGS mode.
(A possible future extension of this work would be to also let SLUB call
the ->ctor() on every allocation instead of only when the slab page is
allocated; then tag-based modes would be able to assign new tags on every
reallocation.)
Tested-by: syzbot+263726e59eab6b442723@syzkaller.appspotmail.com
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Acked-by: Marco Elver <elver@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz> #slab
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/kasan')
-rw-r--r-- | mm/kasan/common.c | 11 | ||||
-rw-r--r-- | mm/kasan/kasan_test.c | 46 |
2 files changed, 52 insertions, 5 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index f26bbc087b3b..ed4873e18c75 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -230,14 +230,14 @@ static bool check_slab_allocation(struct kmem_cache *cache, void *object, } static inline void poison_slab_object(struct kmem_cache *cache, void *object, - bool init) + bool init, bool still_accessible) { void *tagged_object = object; object = kasan_reset_tag(object); /* RCU slabs could be legally used after free within the RCU period. */ - if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) + if (unlikely(still_accessible)) return; kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), @@ -255,12 +255,13 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, return check_slab_allocation(cache, object, ip); } -bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init) +bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, + bool still_accessible) { if (!kasan_arch_is_ready() || is_kfence_address(object)) return false; - poison_slab_object(cache, object, init); + poison_slab_object(cache, object, init, still_accessible); /* * If the object is put into quarantine, do not let slab put the object @@ -518,7 +519,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) if (check_slab_allocation(slab->slab_cache, ptr, ip)) return false; - poison_slab_object(slab->slab_cache, ptr, false); + poison_slab_object(slab->slab_cache, ptr, false, false); return true; } diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 7b32be2a3cf0..567d33b493e2 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -996,6 +996,51 @@ static void kmem_cache_invalid_free(struct kunit *test) kmem_cache_destroy(cache); } +static void kmem_cache_rcu_uaf(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG); + + cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, + NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + *p = 1; + + rcu_read_lock(); + + /* Free the object - this will internally schedule an RCU callback. */ + kmem_cache_free(cache, p); + + /* + * We should still be allowed to access the object at this point because + * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side + * critical section since before the kmem_cache_free(). + */ + READ_ONCE(*p); + + rcu_read_unlock(); + + /* + * Wait for the RCU callback to execute; after this, the object should + * have actually been freed from KASAN's perspective. + */ + rcu_barrier(); + + KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p)); + + kmem_cache_destroy(cache); +} + static void empty_cache_ctor(void *object) { } static void kmem_cache_double_destroy(struct kunit *test) @@ -1937,6 +1982,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmem_cache_oob), KUNIT_CASE(kmem_cache_double_free), KUNIT_CASE(kmem_cache_invalid_free), + KUNIT_CASE(kmem_cache_rcu_uaf), KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), |