From 01e377c539ca52a6c753d0fdbe93b3b8fcd66a1c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Jun 2020 21:08:10 +0200 Subject: sched/core: Remove mmdrop() definition Commit bf2c59fce4074 ("sched/core: Fix illegal RCU from offline CPUs") introduced a definition for mmdrop() but a a few lines above there is already mmdrop() defined as static inline. Remove the newly introduced mmdrop() definition. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200618190810.790211-1-bigeasy@linutronix.de --- include/linux/sched/mm.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux/sched/mm.h') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 480a4d1b7dd8..a98604ea76f1 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,8 +49,6 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } -void mmdrop(struct mm_struct *mm); - /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_lock for writing before modifying the -- cgit From e0078e2eb8620079d988f150ba02a4ce9b5a946a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:30:31 -0700 Subject: linux/sched/mm.h: drop duplicated words in comments Drop doubled words "to" and "that". Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/927ea8d8-3f6c-9b65-4c2b-63ab4bd59ef1@infradead.org --- include/linux/sched/mm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux/sched/mm.h') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index a98604ea76f1..6be66f52a2ad 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -23,7 +23,7 @@ extern struct mm_struct *mm_alloc(void); * will still exist later on and mmget_not_zero() has to be used before * accessing it. * - * This is a preferred way to to pin @mm for a longer/unbounded amount + * This is a preferred way to pin @mm for a longer/unbounded amount * of time. * * Use mmdrop() to release the reference acquired by mmgrab(). @@ -232,7 +232,7 @@ static inline unsigned int memalloc_noio_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_noio_save call. */ static inline void memalloc_noio_restore(unsigned int flags) @@ -263,7 +263,7 @@ static inline unsigned int memalloc_nofs_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_nofs_save call. */ static inline void memalloc_nofs_restore(unsigned int flags) -- cgit From 8510e69c8efef82f2b37ea3e8ea19a27122c533e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 6 Aug 2020 23:26:04 -0700 Subject: mm/page_alloc: fix memalloc_nocma_{save/restore} APIs Currently, memalloc_nocma_{save/restore} API that prevents CMA area in page allocation is implemented by using current_gfp_context(). However, there are two problems of this implementation. First, this doesn't work for allocation fastpath. In the fastpath, original gfp_mask is used since current_gfp_context() is introduced in order to control reclaim and it is on slowpath. So, CMA area can be allocated through the allocation fastpath even if memalloc_nocma_{save/restore} APIs are used. Currently, there is just one user for these APIs and it has a fallback method to prevent actual problem. Second, clearing __GFP_MOVABLE in current_gfp_context() has a side effect to exclude the memory on the ZONE_MOVABLE for allocation target. To fix these problems, this patch changes the implementation to exclude CMA area in page allocation. Main point of this change is using the alloc_flags. alloc_flags is mainly used to control allocation so it fits for excluding CMA area in allocation. Fixes: d7fefcc8de91 (mm/cma: add PF flag to force non cma alloc) Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Cc: Christoph Hellwig Cc: Roman Gushchin Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: Michal Hocko Cc: "Aneesh Kumar K . V" Link: http://lkml.kernel.org/r/1595468942-29687-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux/sched/mm.h') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 6be66f52a2ad..85023ddc2dc2 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -175,12 +175,10 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS - * PF_MEMALLOC_NOCMA implies no allocation from CMA region. */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & - (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { + if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence @@ -189,10 +187,6 @@ static inline gfp_t current_gfp_context(gfp_t flags) flags &= ~(__GFP_IO | __GFP_FS); else if (current->flags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; -#ifdef CONFIG_CMA - if (current->flags & PF_MEMALLOC_NOCMA) - flags &= ~__GFP_MOVABLE; -#endif } return flags; } -- cgit From af161bee93332a1ff10ba029f41936d21850ae82 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 11 Aug 2020 18:32:06 -0700 Subject: include/linux/sched/mm.h: optimize current_gfp_context() The current_gfp_context() converts a number of PF_MEMALLOC_* per-process flags into the corresponding GFP_* flags for memory allocation. In that function, current->flags is accessed 3 times. That may lead to duplicated access of the same memory location. This is not usually a problem with minimal debug config options on as the compiler can optimize away the duplicated memory accesses. With most of the debug config options on, however, that may not be the case. For example, the x86-64 object size of the __need_fs_reclaim() in a debug kernel that calls current_gfp_context() was 309 bytes. With this patch applied, the object size is reduced to 202 bytes. This is a saving of 107 bytes and will probably be slightly faster too. Use READ_ONCE() to access current->flags to prevent the compiler from possibly accessing current->flags multiple times. Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Michel Lespinasse Link: http://lkml.kernel.org/r/20200618212936.9776-1-longman@redhat.com Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux/sched/mm.h') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 85023ddc2dc2..f889e332912f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -178,14 +178,16 @@ static inline bool in_vfork(struct task_struct *tsk) */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + unsigned int pflags = READ_ONCE(current->flags); + + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ - if (current->flags & PF_MEMALLOC_NOIO) + if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); - else if (current->flags & PF_MEMALLOC_NOFS) + else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; } return flags; -- cgit