Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / mm / page-writeback.c
index 0bca237..f4cd7d8 100644 (file)
@@ -299,9 +299,6 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
 
        return nr_pages;
 }
-#ifdef CONFIG_HIGHMEM
-atomic_t highmem_file_pages;
-#endif
 
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
@@ -309,22 +306,25 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        int node;
        unsigned long x = 0;
        int i;
-       unsigned long dirtyable = atomic_read(&highmem_file_pages);
 
        for_each_node_state(node, N_HIGH_MEMORY) {
                for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
                        struct zone *z;
+                       unsigned long nr_pages;
 
                        if (!is_highmem_idx(i))
                                continue;
 
                        z = &NODE_DATA(node)->node_zones[i];
-                       dirtyable += zone_page_state(z, NR_FREE_PAGES);
+                       if (!populated_zone(z))
+                               continue;
 
+                       nr_pages = zone_page_state(z, NR_FREE_PAGES);
                        /* watch for underflows */
-                       dirtyable -= min(dirtyable, high_wmark_pages(z));
-
-                       x += dirtyable;
+                       nr_pages -= min(nr_pages, high_wmark_pages(z));
+                       nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
+                       nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+                       x += nr_pages;
                }
        }
 
@@ -2462,6 +2462,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
                __inc_node_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                __inc_node_page_state(page, NR_DIRTIED);
                __inc_wb_stat(wb, WB_RECLAIMABLE);
                __inc_wb_stat(wb, WB_DIRTIED);
@@ -2483,6 +2484,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
        if (mapping_cap_account_dirty(mapping)) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
                dec_node_page_state(page, NR_FILE_DIRTY);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                dec_wb_stat(wb, WB_RECLAIMABLE);
                task_io_account_cancelled_write(PAGE_SIZE);
        }
@@ -2739,6 +2741,7 @@ int clear_page_dirty_for_io(struct page *page)
                if (TestClearPageDirty(page)) {
                        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
                        dec_node_page_state(page, NR_FILE_DIRTY);
+                       dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                        dec_wb_stat(wb, WB_RECLAIMABLE);
                        ret = 1;
                }
@@ -2785,6 +2788,7 @@ int test_clear_page_writeback(struct page *page)
        if (ret) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
                dec_node_page_state(page, NR_WRITEBACK);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                inc_node_page_state(page, NR_WRITTEN);
        }
        unlock_page_memcg(page);
@@ -2839,6 +2843,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
        if (!ret) {
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
                inc_node_page_state(page, NR_WRITEBACK);
+               inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
        }
        unlock_page_memcg(page);
        return ret;