diff options
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 83 |
1 files changed, 52 insertions, 31 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 932d13ada36c..eed7ea556cb1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -259,7 +259,7 @@ out: * @object_size: The size of objects to be created in this cache. * @args: Additional arguments for the cache creation (see * &struct kmem_cache_args). - * @flags: See the desriptions of individual flags. The common ones are listed + * @flags: See the descriptions of individual flags. The common ones are listed * in the description below. * * Not to be called directly, use the kmem_cache_create() wrapper with the same @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s) return; /* in-flight kfree_rcu()'s may include objects from our cache */ - kvfree_rcu_barrier(); + kvfree_rcu_barrier_on_cache(s); if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && (s->flags & SLAB_TYPESAFE_BY_RCU)) { @@ -997,26 +997,27 @@ void __init create_kmalloc_caches(void) */ size_t __ksize(const void *object) { - struct folio *folio; + const struct page *page; + const struct slab *slab; if (unlikely(object == ZERO_SIZE_PTR)) return 0; - folio = virt_to_folio(object); + page = virt_to_page(object); - if (unlikely(!folio_test_slab(folio))) { - if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) - return 0; - if (WARN_ON(object != folio_address(folio))) - return 0; - return folio_size(folio); - } + if (unlikely(PageLargeKmalloc(page))) + return large_kmalloc_size(page); + + slab = page_slab(page); + /* Delete this after we're sure there are no users */ + if (WARN_ON(!slab)) + return page_size(page); #ifdef CONFIG_SLUB_DEBUG - skip_orig_size_check(folio_slab(folio)->slab_cache, object); + skip_orig_size_check(slab->slab_cache, object); #endif - return slab_ksize(folio_slab(folio)->slab_cache); + return slab_ksize(slab->slab_cache); } gfp_t kmalloc_fix_flags(gfp_t flags) @@ -1614,17 +1615,15 @@ static void kfree_rcu_work(struct work_struct *work) static bool kfree_rcu_sheaf(void *obj) { struct kmem_cache *s; - struct folio *folio; struct slab *slab; if (is_vmalloc_addr(obj)) return false; - folio = virt_to_folio(obj); - if (unlikely(!folio_test_slab(folio))) + slab = virt_to_slab(obj); + if (unlikely(!slab)) return false; - slab = folio_slab(folio); s = slab->slab_cache; if (s->cpu_sheaves) { if (likely(!IS_ENABLED(CONFIG_NUMA) || @@ -2039,25 +2038,13 @@ unlock_return: } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -/** - * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. - * - * Note that a single argument of kvfree_rcu() call has a slow path that - * triggers synchronize_rcu() following by freeing a pointer. It is done - * before the return from the function. Therefore for any single-argument - * call that will result in a kfree() to a cache that is to be destroyed - * during module exit, it is developer's responsibility to ensure that all - * such calls have returned before the call to kmem_cache_destroy(). - */ -void kvfree_rcu_barrier(void) +static inline void __kvfree_rcu_barrier(void) { struct kfree_rcu_cpu_work *krwp; struct kfree_rcu_cpu *krcp; bool queued; int i, cpu; - flush_all_rcu_sheaves(); - /* * Firstly we detach objects and queue them over an RCU-batch * for all CPUs. Finally queued works are flushed for each CPU. @@ -2119,8 +2106,43 @@ void kvfree_rcu_barrier(void) } } } + +/** + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. + * + * Note that a single argument of kvfree_rcu() call has a slow path that + * triggers synchronize_rcu() following by freeing a pointer. It is done + * before the return from the function. Therefore for any single-argument + * call that will result in a kfree() to a cache that is to be destroyed + * during module exit, it is developer's responsibility to ensure that all + * such calls have returned before the call to kmem_cache_destroy(). + */ +void kvfree_rcu_barrier(void) +{ + flush_all_rcu_sheaves(); + __kvfree_rcu_barrier(); +} EXPORT_SYMBOL_GPL(kvfree_rcu_barrier); +/** + * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a + * specific slab cache. + * @s: slab cache to wait for + * + * See the description of kvfree_rcu_barrier() for details. + */ +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{ + if (s->cpu_sheaves) + flush_rcu_sheaves_on_cache(s); + /* + * TODO: Introduce a version of __kvfree_rcu_barrier() that works + * on a specific slab cache. + */ + __kvfree_rcu_barrier(); +} +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache); + static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -2216,4 +2238,3 @@ void __init kvfree_rcu_init(void) } #endif /* CONFIG_KVFREE_RCU_BATCHED */ - |
