Merge tag 'rtc-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[cascardo/linux.git] / mm / vmstat.c
index 02e7406..89cec42 100644 (file)
@@ -921,25 +921,18 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 const char * const vmstat_text[] = {
        /* enum zone_stat_item countes */
        "nr_free_pages",
-       "nr_alloc_batch",
-       "nr_zone_anon_lru",
-       "nr_zone_file_lru",
+       "nr_zone_inactive_anon",
+       "nr_zone_active_anon",
+       "nr_zone_inactive_file",
+       "nr_zone_active_file",
+       "nr_zone_unevictable",
+       "nr_zone_write_pending",
        "nr_mlock",
-       "nr_file_pages",
-       "nr_dirty",
-       "nr_writeback",
        "nr_slab_reclaimable",
        "nr_slab_unreclaimable",
        "nr_page_table_pages",
        "nr_kernel_stack",
-       "nr_unstable",
        "nr_bounce",
-       "nr_vmscan_write",
-       "nr_vmscan_immediate_reclaim",
-       "nr_writeback_temp",
-       "nr_shmem",
-       "nr_dirtied",
-       "nr_written",
 #if IS_ENABLED(CONFIG_ZSMALLOC)
        "nr_zspages",
 #endif
@@ -951,9 +944,6 @@ const char * const vmstat_text[] = {
        "numa_local",
        "numa_other",
 #endif
-       "nr_anon_transparent_hugepages",
-       "nr_shmem_hugepages",
-       "nr_shmem_pmdmapped",
        "nr_free_cma",
 
        /* Node-based counters */
@@ -970,6 +960,19 @@ const char * const vmstat_text[] = {
        "workingset_nodereclaim",
        "nr_anon_pages",
        "nr_mapped",
+       "nr_file_pages",
+       "nr_dirty",
+       "nr_writeback",
+       "nr_writeback_temp",
+       "nr_shmem",
+       "nr_shmem_hugepages",
+       "nr_shmem_pmdmapped",
+       "nr_anon_transparent_hugepages",
+       "nr_unstable",
+       "nr_vmscan_write",
+       "nr_vmscan_immediate_reclaim",
+       "nr_dirtied",
+       "nr_written",
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
@@ -983,6 +986,8 @@ const char * const vmstat_text[] = {
        "pswpout",
 
        TEXTS_FOR_ZONES("pgalloc")
+       TEXTS_FOR_ZONES("allocstall")
+       TEXTS_FOR_ZONES("pgskip")
 
        "pgfree",
        "pgactivate",
@@ -1008,7 +1013,6 @@ const char * const vmstat_text[] = {
        "kswapd_low_wmark_hit_quickly",
        "kswapd_high_wmark_hit_quickly",
        "pageoutrun",
-       "allocstall",
 
        "pgrotated",
 
@@ -1414,11 +1418,35 @@ static const struct file_operations pagetypeinfo_file_ops = {
        .release        = seq_release,
 };
 
+static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
+{
+       int zid;
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               struct zone *compare = &pgdat->node_zones[zid];
+
+               if (populated_zone(compare))
+                       return zone == compare;
+       }
+
+       /* The zone must be somewhere! */
+       WARN_ON_ONCE(1);
+       return false;
+}
+
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                                                        struct zone *zone)
 {
        int i;
        seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
+       if (is_zone_first_populated(pgdat, zone)) {
+               seq_printf(m, "\n  per-node stats");
+               for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+                       seq_printf(m, "\n      %-12s %lu",
+                               vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+                               node_page_state(pgdat, i));
+               }
+       }
        seq_printf(m,
                   "\n  pages free     %lu"
                   "\n        min      %lu"
@@ -1631,10 +1659,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
                val = atomic_long_read(&vm_zone_stat[i]);
                if (val < 0) {
                        switch (i) {
-                       case NR_ALLOC_BATCH:
                        case NR_PAGES_SCANNED:
                                /*
-                                * These are often seen to go negative in
+                                * This is often seen to go negative in
                                 * recent kernels, but not to go permanently
                                 * negative.  Whilst it would be nicer not to
                                 * have exceptions, rooting them out would be