diff options
| author | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2021-10-22 13:10:02 +0300 |
|---|---|---|
| committer | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2021-10-22 13:10:02 +0300 |
| commit | ef3e619221248a7ea5fc711a2bf9017c94d2f644 (patch) | |
| tree | b69232290b6a3e3d2a26560052c7183ab0ba3c52 /mm | |
| parent | 777226dac058d119286b4081953cb5aa2cb7394b (diff) | |
| parent | 6f2f7c83303d2227f47551423e507d77d9ea01c7 (diff) | |
Merge drm/drm-next into drm-intel-gt-next
Backmerging to pull in the new dma_resv iterators requested by
Maarten and Matt.
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/ksm.c | 2 | ||||
| -rw-r--r-- | mm/memblock.c | 16 | ||||
| -rw-r--r-- | mm/vmscan.c | 15 |
3 files changed, 30 insertions, 3 deletions
@@ -651,10 +651,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node) * from &migrate_nodes. This will verify that future list.h changes * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. */ -#if defined(GCC_VERSION) && GCC_VERSION >= 40903 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); -#endif if (stable_node->head == &migrate_nodes) list_del(&stable_node->list); diff --git a/mm/memblock.c b/mm/memblock.c index 0ab5a749bfa6..184dcd2e5d99 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, kfree(old_array); else if (old_array != memblock_memory_init_regions && old_array != memblock_reserved_init_regions) - memblock_free(__pa(old_array), old_alloc_size); + memblock_free_ptr(old_array, old_alloc_size); /* * Reserve the new array if that comes from the memblock. Otherwise, we @@ -796,6 +796,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) } /** + * memblock_free_ptr - free boot memory allocation + * @ptr: starting address of the boot memory allocation + * @size: size of the boot memory block in bytes + * + * Free boot memory block previously allocated by memblock_alloc_xx() API. + * The freeing memory will not be released to the buddy allocator. + */ +void __init_memblock memblock_free_ptr(void *ptr, size_t size) +{ + if (ptr) + memblock_free(__pa(ptr), size); +} + +/** * memblock_free - free boot memory block * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes diff --git a/mm/vmscan.c b/mm/vmscan.c index 74296c2d1fed..b1512cefcffa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -687,6 +687,21 @@ void unregister_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(unregister_shrinker); +/** + * synchronize_shrinkers - Wait for all running shrinkers to complete. + * + * This is equivalent to calling unregister_shrink() and register_shrinker(), + * but atomically and with less overhead. This is useful to guarantee that all + * shrinker invocations have seen an update, before freeing memory, similar to + * rcu. + */ +void synchronize_shrinkers(void) +{ + down_write(&shrinker_rwsem); + up_write(&shrinker_rwsem); +} +EXPORT_SYMBOL(synchronize_shrinkers); + #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, |
