summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2005-08-02 21:20:47 -0700
committerSteve French <sfrench@us.ibm.com>2005-08-02 21:20:47 -0700
commitbcf7e34a71afe24dc210b7825f6f139774cb905c (patch)
treed81a8f89eb103290ae70008fde185c068d9f7a4f /mm/memory.c
parenteda3c029899cbf435d76fea43b7e1404439ccec9 (diff)
parent688d191821de7893043f5a37970472627aaffa4e (diff)
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c46
1 files changed, 21 insertions, 25 deletions
diff --git a/mm/memory.c b/mm/memory.c
index beabdefa6254..2405289dfdf8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
* Do a quick page-table lookup for a single page.
* mm->page_table_lock must be held.
*/
-static struct page *
-__follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
+static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
+ int read, int write, int accessed)
{
pgd_t *pgd;
pud_t *pud;
@@ -811,16 +811,15 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
pte = *ptep;
pte_unmap(ptep);
if (pte_present(pte)) {
- if (write && !pte_write(pte))
+ if (write && !pte_dirty(pte))
goto out;
if (read && !pte_read(pte))
goto out;
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (write && !pte_dirty(pte) && !PageDirty(page))
- set_page_dirty(page);
- mark_page_accessed(page);
+ if (accessed)
+ mark_page_accessed(page);
return page;
}
}
@@ -829,16 +828,19 @@ out:
return NULL;
}
-struct page *
+inline struct page *
follow_page(struct mm_struct *mm, unsigned long address, int write)
{
- return __follow_page(mm, address, /*read*/0, write);
+ return __follow_page(mm, address, 0, write, 1);
}
-int
-check_user_page_readable(struct mm_struct *mm, unsigned long address)
+/*
+ * check_user_page_readable() can be called frm niterrupt context by oprofile,
+ * so we need to avoid taking any non-irq-safe locks
+ */
+int check_user_page_readable(struct mm_struct *mm, unsigned long address)
{
- return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
+ return __follow_page(mm, address, 1, 0, 0) != NULL;
}
EXPORT_SYMBOL(check_user_page_readable);
@@ -908,9 +910,13 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
- BUG_ON(pmd_none(*pmd));
+ if (pmd_none(*pmd))
+ return i ? : -EFAULT;
pte = pte_offset_map(pmd, pg);
- BUG_ON(pte_none(*pte));
+ if (pte_none(*pte)) {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
if (pages) {
pages[i] = pte_page(*pte);
get_page(pages[i]);
@@ -936,10 +942,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
spin_lock(&mm->page_table_lock);
do {
struct page *page;
- int lookup_write = write;
cond_resched_lock(&mm->page_table_lock);
- while (!(page = follow_page(mm, start, lookup_write))) {
+ while (!(page = follow_page(mm, start, write))) {
/*
* Shortcut for anonymous pages. We don't want
* to force the creation of pages tables for
@@ -947,8 +952,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* nobody touched so far. This is important
* for doing a core dump for these mappings.
*/
- if (!lookup_write &&
- untouched_anonymous_page(mm,vma,start)) {
+ if (!write && untouched_anonymous_page(mm,vma,start)) {
page = ZERO_PAGE(start);
break;
}
@@ -967,14 +971,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
default:
BUG();
}
- /*
- * Now that we have performed a write fault
- * and surely no longer have a shared page we
- * shouldn't write, we shouldn't ignore an
- * unwritable page in the page table if
- * we are forcing write access.
- */
- lookup_write = write && !force;
spin_lock(&mm->page_table_lock);
}
if (pages) {