From 6d7cd8c1373746a93dc868ee9d38a82df78b38aa Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 6 Nov 2018 13:11:57 -0500 Subject: dax: Remove optimisation from dax_lock_mapping_entry Skipping some of the revalidation after we sleep can lead to returning a mapping which has already been freed. Just drop this optimisation. Reported-by: Dan Williams Fixes: 9f32d221301c ("dax: Convert dax_lock_mapping_entry to XArray") Signed-off-by: Matthew Wilcox --- fs/dax.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 616e36ea6aaa..529ac9d7c10a 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -383,11 +383,8 @@ bool dax_lock_mapping_entry(struct page *page) entry = xas_load(&xas); if (dax_is_locked(entry)) { entry = get_unlocked_entry(&xas); - /* Did the page move while we slept? */ - if (dax_to_pfn(entry) != page_to_pfn(page)) { - xas_unlock_irq(&xas); - continue; - } + xas_unlock_irq(&xas); + continue; } dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); -- cgit From 7ae2ea7dc45e8250a74cfaaecdce578427669ae5 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 9 Nov 2018 20:09:37 -0500 Subject: dax: Make sure the unlocking entry isn't locked I wrote the semantics in the commit message, but didn't document it in the source code. Use a BUG_ON instead (if any code does do this, it's really buggy; we can't recover and it's worth taking the machine down). Signed-off-by: Matthew Wilcox --- fs/dax.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 529ac9d7c10a..7944417f5a71 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -255,6 +255,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry) { void *old; + BUG_ON(dax_is_locked(entry)); xas_reset(xas); xas_lock_irq(xas); old = xas_store(xas, entry); -- cgit From c5bbd4515a05f8acb7e6ab6297044a529762cbf5 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 16 Nov 2018 14:37:06 -0500 Subject: dax: Reinstate RCU protection of inode For the device-dax case, it is possible that the inode can go away underneath us. The rcu_read_lock() was there to prevent it from being freed, and not (as I thought) to protect the tree. Bring back the rcu_read_lock() protection. Also add a little kernel-doc; while this function is not exported to modules, it is used from outside dax.c Reported-by: Dan Williams Fixes: 9f32d221301c ("dax: Convert dax_lock_mapping_entry to XArray") Signed-off-by: Matthew Wilcox --- fs/dax.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 7944417f5a71..ce87d21b3805 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -353,16 +353,27 @@ static struct page *dax_busy_page(void *entry) return NULL; } +/* + * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page + * @page: The page whose entry we want to lock + * + * Context: Process context. + * Return: %true if the entry was locked or does not need to be locked. + */ bool dax_lock_mapping_entry(struct page *page) { XA_STATE(xas, NULL, 0); void *entry; + bool locked; + /* Ensure page->mapping isn't freed while we look at it */ + rcu_read_lock(); for (;;) { struct address_space *mapping = READ_ONCE(page->mapping); + locked = false; if (!dax_mapping(mapping)) - return false; + break; /* * In the device-dax case there's no need to lock, a @@ -371,8 +382,9 @@ bool dax_lock_mapping_entry(struct page *page) * otherwise we would not have a valid pfn_to_page() * translation. */ + locked = true; if (S_ISCHR(mapping->host->i_mode)) - return true; + break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); @@ -383,14 +395,18 @@ bool dax_lock_mapping_entry(struct page *page) xas_set(&xas, page->index); entry = xas_load(&xas); if (dax_is_locked(entry)) { + rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas); + rcu_read_lock(); continue; } dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); - return true; + break; } + rcu_read_unlock(); + return locked; } void dax_unlock_mapping_entry(struct page *page) -- cgit From fda490d39fc0668d92e170d95c11e35a010019aa Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 16 Nov 2018 15:07:31 -0500 Subject: dax: Fix dax_unlock_mapping_entry for PMD pages Device DAX PMD pages do not set the PageHead bit for compound pages. Fix for now by retrieving the PMD bit from the entry, but eventually we will be passed the page size by the caller. Reported-by: Dan Williams Fixes: 9f32d221301c ("dax: Convert dax_lock_mapping_entry to XArray") Signed-off-by: Matthew Wilcox --- fs/dax.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index ce87d21b3805..5426252375f6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags) return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); } -static void *dax_make_page_entry(struct page *page) -{ - pfn_t pfn = page_to_pfn_t(page); - return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0); -} - static bool dax_is_locked(void *entry) { return xa_to_value(entry) & DAX_LOCKED; @@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry) return 0; } -static int dax_is_pmd_entry(void *entry) +static unsigned long dax_is_pmd_entry(void *entry) { return xa_to_value(entry) & DAX_PMD; } -static int dax_is_pte_entry(void *entry) +static bool dax_is_pte_entry(void *entry) { return !(xa_to_value(entry) & DAX_PMD); } @@ -413,11 +407,16 @@ void dax_unlock_mapping_entry(struct page *page) { struct address_space *mapping = page->mapping; XA_STATE(xas, &mapping->i_pages, page->index); + void *entry; if (S_ISCHR(mapping->host->i_mode)) return; - dax_unlock_entry(&xas, dax_make_page_entry(page)); + rcu_read_lock(); + entry = xas_load(&xas); + rcu_read_unlock(); + entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry)); + dax_unlock_entry(&xas, entry); } /* -- cgit From 0e40de0338d005f73d46898a21544cd26f01b4ce Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 16 Nov 2018 15:19:13 -0500 Subject: dax: Fix huge page faults Using xas_load() with a PMD-sized xa_state would work if either a PMD-sized entry was present or a PTE sized entry was present in the first 64 entries (of the 512 PTEs in a PMD on x86). If there was no PTE in the first 64 entries, grab_mapping_entry() would believe there were no entries present, allocate a PMD-sized entry and overwrite the PTE in the page cache. Use xas_find_conflict() instead which turns out to simplify both get_unlocked_entry() and grab_mapping_entry(). Also remove a WARN_ON_ONCE from grab_mapping_entry() as it will have already triggered in get_unlocked_entry(). Fixes: cfc93c6c6c96 ("dax: Convert dax_insert_pfn_mkwrite to XArray") Signed-off-by: Matthew Wilcox --- fs/dax.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 5426252375f6..cf2394e2bf4b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -216,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas) ewait.wait.func = wake_exceptional_entry_func; for (;;) { - entry = xas_load(xas); - if (!entry || xa_is_internal(entry) || - WARN_ON_ONCE(!xa_is_value(entry)) || + entry = xas_find_conflict(xas); + if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || !dax_is_locked(entry)) return entry; @@ -458,11 +457,9 @@ static void *grab_mapping_entry(struct xa_state *xas, retry: xas_lock_irq(xas); entry = get_unlocked_entry(xas); - if (xa_is_internal(entry)) - goto fallback; if (entry) { - if (WARN_ON_ONCE(!xa_is_value(entry))) { + if (!xa_is_value(entry)) { xas_set_err(xas, EIO); goto out_unlock; } @@ -1641,8 +1638,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) /* Did we race with someone splitting entry or so? */ if (!entry || (order == 0 && !dax_is_pte_entry(entry)) || - (order == PMD_ORDER && (xa_is_internal(entry) || - !dax_is_pmd_entry(entry)))) { + (order == PMD_ORDER && !dax_is_pmd_entry(entry))) { put_unlocked_entry(&xas, entry); xas_unlock_irq(&xas); trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, -- cgit From 25bbe21bf427a81b8e3ccd480ea0e1d940256156 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 16 Nov 2018 15:50:02 -0500 Subject: dax: Avoid losing wakeup in dax_lock_mapping_entry After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups. Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox --- fs/dax.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index cf2394e2bf4b..9bcce89ea18e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -391,6 +391,7 @@ bool dax_lock_mapping_entry(struct page *page) rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas); + put_unlocked_entry(&xas, entry); rcu_read_lock(); continue; } -- cgit