path: root/mm/memcontrol.c
diff options
authorJohannes Weiner <>2021-04-29 22:55:32 -0700
committerLinus Torvalds <>2021-04-30 11:20:37 -0700
commit1c824a680b1b67ad43c0908f11a70bcf37af56d5 (patch)
tree4e732f16ab91e6bf8a0e31ac8417342f9fb693dc /mm/memcontrol.c
parent79e3094c53c56d0d4da23f578de271e7602ba5ed (diff)
mm: page-writeback: simplify memcg handling in test_clear_page_writeback()
Page writeback doesn't hold a page reference, which allows truncate to free a page the second PageWriteback is cleared. This used to require special attention in test_clear_page_writeback(), where we had to be careful not to rely on the unstable page->memcg binding and look up all the necessary information before clearing the writeback flag. Since commit 073861ed77b6 ("mm: fix VM_BUG_ON(PageTail) and BUG_ON(PageWriteback)") test_clear_page_writeback() is called with an explicit reference on the page, and this dance is no longer needed. Use unlock_page_memcg() and dec_lruvec_page_state() directly. This removes the last user of the lock_page_memcg() return value, change it to void. Touch up the comments in there as well. This also removes the last extern user of __unlock_page_memcg(), make it static. Further, it removes the last user of dec_lruvec_state(), delete it, along with a few other unused helpers. Link: Signed-off-by: Johannes Weiner <> Acked-by: Hugh Dickins <> Reviewed-by: Shakeel Butt <> Acked-by: Michal Hocko <> Cc: Roman Gushchin <> Signed-off-by: Andrew Morton <> Signed-off-by: Linus Torvalds <>
Diffstat (limited to 'mm/memcontrol.c')
1 files changed, 11 insertions, 25 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e064ac0d850a..06caac775abb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2118,11 +2118,10 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
* This function protects unlocked LRU pages from being moved to
* another cgroup.
- * It ensures lifetime of the returned memcg. Caller is responsible
- * for the lifetime of the page; __unlock_page_memcg() is available
- * when @page might get freed inside the locked section.
+ * It ensures lifetime of the locked memcg. Caller is responsible
+ * for the lifetime of the page.
-struct mem_cgroup *lock_page_memcg(struct page *page)
+void lock_page_memcg(struct page *page)
struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg;
@@ -2132,21 +2131,15 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
- *
- * The RCU lock also protects the memcg from being freed when
- * the page state that is going to change is the only thing
- * preventing the page itself from being freed. E.g. writeback
- * doesn't hold a page reference and relies on PG_writeback to
- * keep off truncation, migration and so forth.
if (mem_cgroup_disabled())
- return NULL;
+ return;
memcg = page_memcg(head);
if (unlikely(!memcg))
- return NULL;
+ return;
@@ -2155,7 +2148,7 @@ again:
if (atomic_read(&memcg->moving_account) <= 0)
- return memcg;
+ return;
spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page_memcg(head)) {
@@ -2164,24 +2157,17 @@ again:
- * When charge migration first begins, we can have locked and
- * unlocked page stat updates happening concurrently. Track
- * the task who has the lock for unlock_page_memcg().
+ * When charge migration first begins, we can have multiple
+ * critical sections holding the fast-path RCU lock and one
+ * holding the slowpath move_lock. Track the task who has the
+ * move_lock for unlock_page_memcg().
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
- return memcg;
- * __unlock_page_memcg - unlock and unpin a memcg
- * @memcg: the memcg
- *
- * Unlock and unpin a memcg returned by lock_page_memcg().
- */
-void __unlock_page_memcg(struct mem_cgroup *memcg)
+static void __unlock_page_memcg(struct mem_cgroup *memcg)
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;