summaryrefslogtreecommitdiff
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index bb09c032eecf..65de88cdf40e 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -392,7 +392,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
delta >>= PAGE_SHIFT;
}
- /* See __mod_node_page_state */
+ /* See __mod_zone_page_state() */
preempt_disable_nested();
x = delta + __this_cpu_read(*p);
@@ -438,7 +438,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
- /* See __mod_node_page_state */
+ /* See __mod_zone_page_state() */
preempt_disable_nested();
v = __this_cpu_inc_return(*p);
@@ -461,7 +461,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
- /* See __mod_node_page_state */
+ /* See __mod_zone_page_state() */
preempt_disable_nested();
v = __this_cpu_inc_return(*p);
@@ -494,7 +494,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
- /* See __mod_node_page_state */
+ /* See __mod_zone_page_state() */
preempt_disable_nested();
v = __this_cpu_dec_return(*p);
@@ -517,7 +517,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
- /* See __mod_node_page_state */
+ /* See __mod_zone_page_state() */
preempt_disable_nested();
v = __this_cpu_dec_return(*p);
@@ -771,25 +771,28 @@ EXPORT_SYMBOL(dec_node_page_state);
/*
* Fold a differential into the global counters.
- * Returns the number of counters updated.
+ * Returns whether counters were updated.
*/
static int fold_diff(int *zone_diff, int *node_diff)
{
int i;
- int changes = 0;
+ bool changed = false;
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
if (zone_diff[i]) {
atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
- changes++;
+ changed = true;
+ }
}
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
if (node_diff[i]) {
atomic_long_add(node_diff[i], &vm_node_stat[i]);
- changes++;
+ changed = true;
+ }
}
- return changes;
+
+ return changed;
}
/*
@@ -806,16 +809,16 @@ static int fold_diff(int *zone_diff, int *node_diff)
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*
- * The function returns the number of global counters updated.
+ * The function returns whether global counters were updated.
*/
-static int refresh_cpu_vm_stats(bool do_pagesets)
+static bool refresh_cpu_vm_stats(bool do_pagesets)
{
struct pglist_data *pgdat;
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
- int changes = 0;
+ bool changed = false;
for_each_populated_zone(zone) {
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
@@ -839,7 +842,8 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
if (do_pagesets) {
cond_resched();
- changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
+ if (decay_pcp_high(zone, this_cpu_ptr(pcp)))
+ changed = true;
#ifdef CONFIG_NUMA
/*
* Deal with draining the remote pageset of this
@@ -861,13 +865,13 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
}
if (__this_cpu_dec_return(pcp->expire)) {
- changes++;
+ changed = true;
continue;
}
if (__this_cpu_read(pcp->count)) {
drain_zone_pages(zone, this_cpu_ptr(pcp));
- changes++;
+ changed = true;
}
#endif
}
@@ -887,8 +891,9 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
}
}
- changes += fold_diff(global_zone_diff, global_node_diff);
- return changes;
+ if (fold_diff(global_zone_diff, global_node_diff))
+ changed = true;
+ return changed;
}
/*
@@ -1847,9 +1852,13 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
}
seq_printf(m,
"\n node_unreclaimable: %u"
- "\n start_pfn: %lu",
+ "\n start_pfn: %lu"
+ "\n reserved_highatomic: %lu"
+ "\n free_highatomic: %lu",
atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES,
- zone->zone_start_pfn);
+ zone->zone_start_pfn,
+ zone->nr_reserved_highatomic,
+ zone->nr_free_highatomic);
seq_putc(m, '\n');
}