diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2018-02-01 15:04:17 +0100 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2018-02-01 15:04:17 +0100 |
commit | 7bf14c28ee776be567855bd39ed8ff795ea19f55 (patch) | |
tree | 6113748c673e85fccc2c56c050697789c00c6bc2 /mm/page_alloc.c | |
parent | 87cedc6be55954c6efd6eca2e694132513f65a2a (diff) | |
parent | 5fa4ec9cb2e6679e2f828033726f758ea314b9c5 (diff) |
Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Topic branch for stable KVM clockource under Hyper-V.
Thanks to Christoffer Dall for resolving the ARM conflict.
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 73f5d4556b3d..76c9688b6a0a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list) { struct page *page, *next; unsigned long flags, pfn; + int batch_count = 0; /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { @@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, 0); trace_mm_page_free_batched(page); free_unref_page_commit(page, pfn); + + /* + * Guard against excessive IRQ disabled times when we get + * a large list of pages to free. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { + local_irq_restore(flags); + batch_count = 0; + local_irq_save(flags); + } } local_irq_restore(flags); } @@ -6249,6 +6260,8 @@ void __paginginit zero_resv_unavail(void) pgcnt = 0; for_each_resv_unavail_range(i, &start, &end) { for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { + if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) + continue; mm_zero_struct_page(pfn_to_page(pfn)); pgcnt++; } |