Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[cascardo/linux.git] / mm / swap_state.c
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/page_cgroup.h>
21
22 #include <asm/pgtable.h>
23
24 /*
25  * swapper_space is a fiction, retained to simplify the path through
26  * vmscan's shrink_page_list.
27  */
28 static const struct address_space_operations swap_aops = {
29         .writepage      = swap_writepage,
30         .set_page_dirty = swap_set_page_dirty,
31         .migratepage    = migrate_page,
32 };
33
34 static struct backing_dev_info swap_backing_dev_info = {
35         .name           = "swap",
36         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
37 };
38
39 struct address_space swapper_spaces[MAX_SWAPFILES] = {
40         [0 ... MAX_SWAPFILES - 1] = {
41                 .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42                 .i_mmap_writable = ATOMIC_INIT(0),
43                 .a_ops          = &swap_aops,
44                 .backing_dev_info = &swap_backing_dev_info,
45         }
46 };
47
48 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
49
50 static struct {
51         unsigned long add_total;
52         unsigned long del_total;
53         unsigned long find_success;
54         unsigned long find_total;
55 } swap_cache_info;
56
57 unsigned long total_swapcache_pages(void)
58 {
59         int i;
60         unsigned long ret = 0;
61
62         for (i = 0; i < MAX_SWAPFILES; i++)
63                 ret += swapper_spaces[i].nrpages;
64         return ret;
65 }
66
67 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
68
69 void show_swap_cache_info(void)
70 {
71         printk("%lu pages in swap cache\n", total_swapcache_pages());
72         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
73                 swap_cache_info.add_total, swap_cache_info.del_total,
74                 swap_cache_info.find_success, swap_cache_info.find_total);
75         printk("Free swap  = %ldkB\n",
76                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
77         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
78 }
79
80 /*
81  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
82  * but sets SwapCache flag and private instead of mapping and index.
83  */
84 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
85 {
86         int error;
87         struct address_space *address_space;
88
89         VM_BUG_ON_PAGE(!PageLocked(page), page);
90         VM_BUG_ON_PAGE(PageSwapCache(page), page);
91         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
92
93         page_cache_get(page);
94         SetPageSwapCache(page);
95         set_page_private(page, entry.val);
96
97         address_space = swap_address_space(entry);
98         spin_lock_irq(&address_space->tree_lock);
99         error = radix_tree_insert(&address_space->page_tree,
100                                         entry.val, page);
101         if (likely(!error)) {
102                 address_space->nrpages++;
103                 __inc_zone_page_state(page, NR_FILE_PAGES);
104                 INC_CACHE_INFO(add_total);
105         }
106         spin_unlock_irq(&address_space->tree_lock);
107
108         if (unlikely(error)) {
109                 /*
110                  * Only the context which have set SWAP_HAS_CACHE flag
111                  * would call add_to_swap_cache().
112                  * So add_to_swap_cache() doesn't returns -EEXIST.
113                  */
114                 VM_BUG_ON(error == -EEXIST);
115                 set_page_private(page, 0UL);
116                 ClearPageSwapCache(page);
117                 page_cache_release(page);
118         }
119
120         return error;
121 }
122
123
124 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
125 {
126         int error;
127
128         error = radix_tree_maybe_preload(gfp_mask);
129         if (!error) {
130                 error = __add_to_swap_cache(page, entry);
131                 radix_tree_preload_end();
132         }
133         return error;
134 }
135
136 /*
137  * This must be called only on pages that have
138  * been verified to be in the swap cache.
139  */
140 void __delete_from_swap_cache(struct page *page)
141 {
142         swp_entry_t entry;
143         struct address_space *address_space;
144
145         VM_BUG_ON_PAGE(!PageLocked(page), page);
146         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
147         VM_BUG_ON_PAGE(PageWriteback(page), page);
148
149         entry.val = page_private(page);
150         address_space = swap_address_space(entry);
151         radix_tree_delete(&address_space->page_tree, page_private(page));
152         set_page_private(page, 0);
153         ClearPageSwapCache(page);
154         address_space->nrpages--;
155         __dec_zone_page_state(page, NR_FILE_PAGES);
156         INC_CACHE_INFO(del_total);
157 }
158
159 /**
160  * add_to_swap - allocate swap space for a page
161  * @page: page we want to move to swap
162  *
163  * Allocate swap space for the page and add the page to the
164  * swap cache.  Caller needs to hold the page lock. 
165  */
166 int add_to_swap(struct page *page, struct list_head *list)
167 {
168         swp_entry_t entry;
169         int err;
170
171         VM_BUG_ON_PAGE(!PageLocked(page), page);
172         VM_BUG_ON_PAGE(!PageUptodate(page), page);
173
174         entry = get_swap_page();
175         if (!entry.val)
176                 return 0;
177
178         if (unlikely(PageTransHuge(page)))
179                 if (unlikely(split_huge_page_to_list(page, list))) {
180                         swapcache_free(entry);
181                         return 0;
182                 }
183
184         /*
185          * Radix-tree node allocations from PF_MEMALLOC contexts could
186          * completely exhaust the page allocator. __GFP_NOMEMALLOC
187          * stops emergency reserves from being allocated.
188          *
189          * TODO: this could cause a theoretical memory reclaim
190          * deadlock in the swap out path.
191          */
192         /*
193          * Add it to the swap cache and mark it dirty
194          */
195         err = add_to_swap_cache(page, entry,
196                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
197
198         if (!err) {     /* Success */
199                 SetPageDirty(page);
200                 return 1;
201         } else {        /* -ENOMEM radix-tree allocation failure */
202                 /*
203                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
204                  * clear SWAP_HAS_CACHE flag.
205                  */
206                 swapcache_free(entry);
207                 return 0;
208         }
209 }
210
211 /*
212  * This must be called only on pages that have
213  * been verified to be in the swap cache and locked.
214  * It will never put the page into the free list,
215  * the caller has a reference on the page.
216  */
217 void delete_from_swap_cache(struct page *page)
218 {
219         swp_entry_t entry;
220         struct address_space *address_space;
221
222         entry.val = page_private(page);
223
224         address_space = swap_address_space(entry);
225         spin_lock_irq(&address_space->tree_lock);
226         __delete_from_swap_cache(page);
227         spin_unlock_irq(&address_space->tree_lock);
228
229         swapcache_free(entry);
230         page_cache_release(page);
231 }
232
233 /* 
234  * If we are the only user, then try to free up the swap cache. 
235  * 
236  * Its ok to check for PageSwapCache without the page lock
237  * here because we are going to recheck again inside
238  * try_to_free_swap() _with_ the lock.
239  *                                      - Marcelo
240  */
241 static inline void free_swap_cache(struct page *page)
242 {
243         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
244                 try_to_free_swap(page);
245                 unlock_page(page);
246         }
247 }
248
249 /* 
250  * Perform a free_page(), also freeing any swap cache associated with
251  * this page if it is the last user of the page.
252  */
253 void free_page_and_swap_cache(struct page *page)
254 {
255         free_swap_cache(page);
256         page_cache_release(page);
257 }
258
259 /*
260  * Passed an array of pages, drop them all from swapcache and then release
261  * them.  They are removed from the LRU and freed if this is their last use.
262  */
263 void free_pages_and_swap_cache(struct page **pages, int nr)
264 {
265         struct page **pagep = pages;
266
267         lru_add_drain();
268         while (nr) {
269                 int todo = min(nr, PAGEVEC_SIZE);
270                 int i;
271
272                 for (i = 0; i < todo; i++)
273                         free_swap_cache(pagep[i]);
274                 release_pages(pagep, todo, false);
275                 pagep += todo;
276                 nr -= todo;
277         }
278 }
279
280 /*
281  * Lookup a swap entry in the swap cache. A found page will be returned
282  * unlocked and with its refcount incremented - we rely on the kernel
283  * lock getting page table operations atomic even if we drop the page
284  * lock before returning.
285  */
286 struct page * lookup_swap_cache(swp_entry_t entry)
287 {
288         struct page *page;
289
290         page = find_get_page(swap_address_space(entry), entry.val);
291
292         if (page) {
293                 INC_CACHE_INFO(find_success);
294                 if (TestClearPageReadahead(page))
295                         atomic_inc(&swapin_readahead_hits);
296         }
297
298         INC_CACHE_INFO(find_total);
299         return page;
300 }
301
302 /* 
303  * Locate a page of swap in physical memory, reserving swap cache space
304  * and reading the disk if it is not already cached.
305  * A failure return means that either the page allocation failed or that
306  * the swap entry is no longer in use.
307  */
308 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
309                         struct vm_area_struct *vma, unsigned long addr)
310 {
311         struct page *found_page, *new_page = NULL;
312         int err;
313
314         do {
315                 /*
316                  * First check the swap cache.  Since this is normally
317                  * called after lookup_swap_cache() failed, re-calling
318                  * that would confuse statistics.
319                  */
320                 found_page = find_get_page(swap_address_space(entry),
321                                         entry.val);
322                 if (found_page)
323                         break;
324
325                 /*
326                  * Get a new page to read into from swap.
327                  */
328                 if (!new_page) {
329                         new_page = alloc_page_vma(gfp_mask, vma, addr);
330                         if (!new_page)
331                                 break;          /* Out of memory */
332                 }
333
334                 /*
335                  * call radix_tree_preload() while we can wait.
336                  */
337                 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
338                 if (err)
339                         break;
340
341                 /*
342                  * Swap entry may have been freed since our caller observed it.
343                  */
344                 err = swapcache_prepare(entry);
345                 if (err == -EEXIST) {
346                         radix_tree_preload_end();
347                         /*
348                          * We might race against get_swap_page() and stumble
349                          * across a SWAP_HAS_CACHE swap_map entry whose page
350                          * has not been brought into the swapcache yet, while
351                          * the other end is scheduled away waiting on discard
352                          * I/O completion at scan_swap_map().
353                          *
354                          * In order to avoid turning this transitory state
355                          * into a permanent loop around this -EEXIST case
356                          * if !CONFIG_PREEMPT and the I/O completion happens
357                          * to be waiting on the CPU waitqueue where we are now
358                          * busy looping, we just conditionally invoke the
359                          * scheduler here, if there are some more important
360                          * tasks to run.
361                          */
362                         cond_resched();
363                         continue;
364                 }
365                 if (err) {              /* swp entry is obsolete ? */
366                         radix_tree_preload_end();
367                         break;
368                 }
369
370                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
371                 __set_page_locked(new_page);
372                 SetPageSwapBacked(new_page);
373                 err = __add_to_swap_cache(new_page, entry);
374                 if (likely(!err)) {
375                         radix_tree_preload_end();
376                         /*
377                          * Initiate read into locked page and return.
378                          */
379                         lru_cache_add_anon(new_page);
380                         swap_readpage(new_page);
381                         return new_page;
382                 }
383                 radix_tree_preload_end();
384                 ClearPageSwapBacked(new_page);
385                 __clear_page_locked(new_page);
386                 /*
387                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
388                  * clear SWAP_HAS_CACHE flag.
389                  */
390                 swapcache_free(entry);
391         } while (err != -ENOMEM);
392
393         if (new_page)
394                 page_cache_release(new_page);
395         return found_page;
396 }
397
398 static unsigned long swapin_nr_pages(unsigned long offset)
399 {
400         static unsigned long prev_offset;
401         unsigned int pages, max_pages, last_ra;
402         static atomic_t last_readahead_pages;
403
404         max_pages = 1 << ACCESS_ONCE(page_cluster);
405         if (max_pages <= 1)
406                 return 1;
407
408         /*
409          * This heuristic has been found to work well on both sequential and
410          * random loads, swapping to hard disk or to SSD: please don't ask
411          * what the "+ 2" means, it just happens to work well, that's all.
412          */
413         pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
414         if (pages == 2) {
415                 /*
416                  * We can have no readahead hits to judge by: but must not get
417                  * stuck here forever, so check for an adjacent offset instead
418                  * (and don't even bother to check whether swap type is same).
419                  */
420                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
421                         pages = 1;
422                 prev_offset = offset;
423         } else {
424                 unsigned int roundup = 4;
425                 while (roundup < pages)
426                         roundup <<= 1;
427                 pages = roundup;
428         }
429
430         if (pages > max_pages)
431                 pages = max_pages;
432
433         /* Don't shrink readahead too fast */
434         last_ra = atomic_read(&last_readahead_pages) / 2;
435         if (pages < last_ra)
436                 pages = last_ra;
437         atomic_set(&last_readahead_pages, pages);
438
439         return pages;
440 }
441
442 /**
443  * swapin_readahead - swap in pages in hope we need them soon
444  * @entry: swap entry of this memory
445  * @gfp_mask: memory allocation flags
446  * @vma: user vma this address belongs to
447  * @addr: target address for mempolicy
448  *
449  * Returns the struct page for entry and addr, after queueing swapin.
450  *
451  * Primitive swap readahead code. We simply read an aligned block of
452  * (1 << page_cluster) entries in the swap area. This method is chosen
453  * because it doesn't cost us any seek time.  We also make sure to queue
454  * the 'original' request together with the readahead ones...
455  *
456  * This has been extended to use the NUMA policies from the mm triggering
457  * the readahead.
458  *
459  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
460  */
461 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
462                         struct vm_area_struct *vma, unsigned long addr)
463 {
464         struct page *page;
465         unsigned long entry_offset = swp_offset(entry);
466         unsigned long offset = entry_offset;
467         unsigned long start_offset, end_offset;
468         unsigned long mask;
469         struct blk_plug plug;
470
471         mask = swapin_nr_pages(offset) - 1;
472         if (!mask)
473                 goto skip;
474
475         /* Read a page_cluster sized and aligned cluster around offset. */
476         start_offset = offset & ~mask;
477         end_offset = offset | mask;
478         if (!start_offset)      /* First page is swap header. */
479                 start_offset++;
480
481         blk_start_plug(&plug);
482         for (offset = start_offset; offset <= end_offset ; offset++) {
483                 /* Ok, do the async read-ahead now */
484                 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
485                                                 gfp_mask, vma, addr);
486                 if (!page)
487                         continue;
488                 if (offset != entry_offset)
489                         SetPageReadahead(page);
490                 page_cache_release(page);
491         }
492         blk_finish_plug(&plug);
493
494         lru_add_drain();        /* Push any new pages onto the LRU now */
495 skip:
496         return read_swap_cache_async(entry, gfp_mask, vma, addr);
497 }