Merge branch 'linux-3.19' of git://anongit.freedesktop.org/git/nouveau/linux-2.6...
[cascardo/linux.git] / mm / swap_state.c
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20
21 #include <asm/pgtable.h>
22
23 /*
24  * swapper_space is a fiction, retained to simplify the path through
25  * vmscan's shrink_page_list.
26  */
27 static const struct address_space_operations swap_aops = {
28         .writepage      = swap_writepage,
29         .set_page_dirty = swap_set_page_dirty,
30 #ifdef CONFIG_MIGRATION
31         .migratepage    = migrate_page,
32 #endif
33 };
34
35 static struct backing_dev_info swap_backing_dev_info = {
36         .name           = "swap",
37         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38 };
39
40 struct address_space swapper_spaces[MAX_SWAPFILES] = {
41         [0 ... MAX_SWAPFILES - 1] = {
42                 .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
43                 .i_mmap_writable = ATOMIC_INIT(0),
44                 .a_ops          = &swap_aops,
45                 .backing_dev_info = &swap_backing_dev_info,
46         }
47 };
48
49 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
50
51 static struct {
52         unsigned long add_total;
53         unsigned long del_total;
54         unsigned long find_success;
55         unsigned long find_total;
56 } swap_cache_info;
57
58 unsigned long total_swapcache_pages(void)
59 {
60         int i;
61         unsigned long ret = 0;
62
63         for (i = 0; i < MAX_SWAPFILES; i++)
64                 ret += swapper_spaces[i].nrpages;
65         return ret;
66 }
67
68 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
69
70 void show_swap_cache_info(void)
71 {
72         printk("%lu pages in swap cache\n", total_swapcache_pages());
73         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
74                 swap_cache_info.add_total, swap_cache_info.del_total,
75                 swap_cache_info.find_success, swap_cache_info.find_total);
76         printk("Free swap  = %ldkB\n",
77                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
78         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
79 }
80
81 /*
82  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
83  * but sets SwapCache flag and private instead of mapping and index.
84  */
85 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
86 {
87         int error;
88         struct address_space *address_space;
89
90         VM_BUG_ON_PAGE(!PageLocked(page), page);
91         VM_BUG_ON_PAGE(PageSwapCache(page), page);
92         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
93
94         page_cache_get(page);
95         SetPageSwapCache(page);
96         set_page_private(page, entry.val);
97
98         address_space = swap_address_space(entry);
99         spin_lock_irq(&address_space->tree_lock);
100         error = radix_tree_insert(&address_space->page_tree,
101                                         entry.val, page);
102         if (likely(!error)) {
103                 address_space->nrpages++;
104                 __inc_zone_page_state(page, NR_FILE_PAGES);
105                 INC_CACHE_INFO(add_total);
106         }
107         spin_unlock_irq(&address_space->tree_lock);
108
109         if (unlikely(error)) {
110                 /*
111                  * Only the context which have set SWAP_HAS_CACHE flag
112                  * would call add_to_swap_cache().
113                  * So add_to_swap_cache() doesn't returns -EEXIST.
114                  */
115                 VM_BUG_ON(error == -EEXIST);
116                 set_page_private(page, 0UL);
117                 ClearPageSwapCache(page);
118                 page_cache_release(page);
119         }
120
121         return error;
122 }
123
124
125 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
126 {
127         int error;
128
129         error = radix_tree_maybe_preload(gfp_mask);
130         if (!error) {
131                 error = __add_to_swap_cache(page, entry);
132                 radix_tree_preload_end();
133         }
134         return error;
135 }
136
137 /*
138  * This must be called only on pages that have
139  * been verified to be in the swap cache.
140  */
141 void __delete_from_swap_cache(struct page *page)
142 {
143         swp_entry_t entry;
144         struct address_space *address_space;
145
146         VM_BUG_ON_PAGE(!PageLocked(page), page);
147         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
148         VM_BUG_ON_PAGE(PageWriteback(page), page);
149
150         entry.val = page_private(page);
151         address_space = swap_address_space(entry);
152         radix_tree_delete(&address_space->page_tree, page_private(page));
153         set_page_private(page, 0);
154         ClearPageSwapCache(page);
155         address_space->nrpages--;
156         __dec_zone_page_state(page, NR_FILE_PAGES);
157         INC_CACHE_INFO(del_total);
158 }
159
160 /**
161  * add_to_swap - allocate swap space for a page
162  * @page: page we want to move to swap
163  *
164  * Allocate swap space for the page and add the page to the
165  * swap cache.  Caller needs to hold the page lock. 
166  */
167 int add_to_swap(struct page *page, struct list_head *list)
168 {
169         swp_entry_t entry;
170         int err;
171
172         VM_BUG_ON_PAGE(!PageLocked(page), page);
173         VM_BUG_ON_PAGE(!PageUptodate(page), page);
174
175         entry = get_swap_page();
176         if (!entry.val)
177                 return 0;
178
179         if (unlikely(PageTransHuge(page)))
180                 if (unlikely(split_huge_page_to_list(page, list))) {
181                         swapcache_free(entry);
182                         return 0;
183                 }
184
185         /*
186          * Radix-tree node allocations from PF_MEMALLOC contexts could
187          * completely exhaust the page allocator. __GFP_NOMEMALLOC
188          * stops emergency reserves from being allocated.
189          *
190          * TODO: this could cause a theoretical memory reclaim
191          * deadlock in the swap out path.
192          */
193         /*
194          * Add it to the swap cache and mark it dirty
195          */
196         err = add_to_swap_cache(page, entry,
197                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
198
199         if (!err) {     /* Success */
200                 SetPageDirty(page);
201                 return 1;
202         } else {        /* -ENOMEM radix-tree allocation failure */
203                 /*
204                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205                  * clear SWAP_HAS_CACHE flag.
206                  */
207                 swapcache_free(entry);
208                 return 0;
209         }
210 }
211
212 /*
213  * This must be called only on pages that have
214  * been verified to be in the swap cache and locked.
215  * It will never put the page into the free list,
216  * the caller has a reference on the page.
217  */
218 void delete_from_swap_cache(struct page *page)
219 {
220         swp_entry_t entry;
221         struct address_space *address_space;
222
223         entry.val = page_private(page);
224
225         address_space = swap_address_space(entry);
226         spin_lock_irq(&address_space->tree_lock);
227         __delete_from_swap_cache(page);
228         spin_unlock_irq(&address_space->tree_lock);
229
230         swapcache_free(entry);
231         page_cache_release(page);
232 }
233
234 /* 
235  * If we are the only user, then try to free up the swap cache. 
236  * 
237  * Its ok to check for PageSwapCache without the page lock
238  * here because we are going to recheck again inside
239  * try_to_free_swap() _with_ the lock.
240  *                                      - Marcelo
241  */
242 static inline void free_swap_cache(struct page *page)
243 {
244         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
245                 try_to_free_swap(page);
246                 unlock_page(page);
247         }
248 }
249
250 /* 
251  * Perform a free_page(), also freeing any swap cache associated with
252  * this page if it is the last user of the page.
253  */
254 void free_page_and_swap_cache(struct page *page)
255 {
256         free_swap_cache(page);
257         page_cache_release(page);
258 }
259
260 /*
261  * Passed an array of pages, drop them all from swapcache and then release
262  * them.  They are removed from the LRU and freed if this is their last use.
263  */
264 void free_pages_and_swap_cache(struct page **pages, int nr)
265 {
266         struct page **pagep = pages;
267         int i;
268
269         lru_add_drain();
270         for (i = 0; i < nr; i++)
271                 free_swap_cache(pagep[i]);
272         release_pages(pagep, nr, false);
273 }
274
275 /*
276  * Lookup a swap entry in the swap cache. A found page will be returned
277  * unlocked and with its refcount incremented - we rely on the kernel
278  * lock getting page table operations atomic even if we drop the page
279  * lock before returning.
280  */
281 struct page * lookup_swap_cache(swp_entry_t entry)
282 {
283         struct page *page;
284
285         page = find_get_page(swap_address_space(entry), entry.val);
286
287         if (page) {
288                 INC_CACHE_INFO(find_success);
289                 if (TestClearPageReadahead(page))
290                         atomic_inc(&swapin_readahead_hits);
291         }
292
293         INC_CACHE_INFO(find_total);
294         return page;
295 }
296
297 /* 
298  * Locate a page of swap in physical memory, reserving swap cache space
299  * and reading the disk if it is not already cached.
300  * A failure return means that either the page allocation failed or that
301  * the swap entry is no longer in use.
302  */
303 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
304                         struct vm_area_struct *vma, unsigned long addr)
305 {
306         struct page *found_page, *new_page = NULL;
307         int err;
308
309         do {
310                 /*
311                  * First check the swap cache.  Since this is normally
312                  * called after lookup_swap_cache() failed, re-calling
313                  * that would confuse statistics.
314                  */
315                 found_page = find_get_page(swap_address_space(entry),
316                                         entry.val);
317                 if (found_page)
318                         break;
319
320                 /*
321                  * Get a new page to read into from swap.
322                  */
323                 if (!new_page) {
324                         new_page = alloc_page_vma(gfp_mask, vma, addr);
325                         if (!new_page)
326                                 break;          /* Out of memory */
327                 }
328
329                 /*
330                  * call radix_tree_preload() while we can wait.
331                  */
332                 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
333                 if (err)
334                         break;
335
336                 /*
337                  * Swap entry may have been freed since our caller observed it.
338                  */
339                 err = swapcache_prepare(entry);
340                 if (err == -EEXIST) {
341                         radix_tree_preload_end();
342                         /*
343                          * We might race against get_swap_page() and stumble
344                          * across a SWAP_HAS_CACHE swap_map entry whose page
345                          * has not been brought into the swapcache yet, while
346                          * the other end is scheduled away waiting on discard
347                          * I/O completion at scan_swap_map().
348                          *
349                          * In order to avoid turning this transitory state
350                          * into a permanent loop around this -EEXIST case
351                          * if !CONFIG_PREEMPT and the I/O completion happens
352                          * to be waiting on the CPU waitqueue where we are now
353                          * busy looping, we just conditionally invoke the
354                          * scheduler here, if there are some more important
355                          * tasks to run.
356                          */
357                         cond_resched();
358                         continue;
359                 }
360                 if (err) {              /* swp entry is obsolete ? */
361                         radix_tree_preload_end();
362                         break;
363                 }
364
365                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
366                 __set_page_locked(new_page);
367                 SetPageSwapBacked(new_page);
368                 err = __add_to_swap_cache(new_page, entry);
369                 if (likely(!err)) {
370                         radix_tree_preload_end();
371                         /*
372                          * Initiate read into locked page and return.
373                          */
374                         lru_cache_add_anon(new_page);
375                         swap_readpage(new_page);
376                         return new_page;
377                 }
378                 radix_tree_preload_end();
379                 ClearPageSwapBacked(new_page);
380                 __clear_page_locked(new_page);
381                 /*
382                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
383                  * clear SWAP_HAS_CACHE flag.
384                  */
385                 swapcache_free(entry);
386         } while (err != -ENOMEM);
387
388         if (new_page)
389                 page_cache_release(new_page);
390         return found_page;
391 }
392
393 static unsigned long swapin_nr_pages(unsigned long offset)
394 {
395         static unsigned long prev_offset;
396         unsigned int pages, max_pages, last_ra;
397         static atomic_t last_readahead_pages;
398
399         max_pages = 1 << ACCESS_ONCE(page_cluster);
400         if (max_pages <= 1)
401                 return 1;
402
403         /*
404          * This heuristic has been found to work well on both sequential and
405          * random loads, swapping to hard disk or to SSD: please don't ask
406          * what the "+ 2" means, it just happens to work well, that's all.
407          */
408         pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
409         if (pages == 2) {
410                 /*
411                  * We can have no readahead hits to judge by: but must not get
412                  * stuck here forever, so check for an adjacent offset instead
413                  * (and don't even bother to check whether swap type is same).
414                  */
415                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
416                         pages = 1;
417                 prev_offset = offset;
418         } else {
419                 unsigned int roundup = 4;
420                 while (roundup < pages)
421                         roundup <<= 1;
422                 pages = roundup;
423         }
424
425         if (pages > max_pages)
426                 pages = max_pages;
427
428         /* Don't shrink readahead too fast */
429         last_ra = atomic_read(&last_readahead_pages) / 2;
430         if (pages < last_ra)
431                 pages = last_ra;
432         atomic_set(&last_readahead_pages, pages);
433
434         return pages;
435 }
436
437 /**
438  * swapin_readahead - swap in pages in hope we need them soon
439  * @entry: swap entry of this memory
440  * @gfp_mask: memory allocation flags
441  * @vma: user vma this address belongs to
442  * @addr: target address for mempolicy
443  *
444  * Returns the struct page for entry and addr, after queueing swapin.
445  *
446  * Primitive swap readahead code. We simply read an aligned block of
447  * (1 << page_cluster) entries in the swap area. This method is chosen
448  * because it doesn't cost us any seek time.  We also make sure to queue
449  * the 'original' request together with the readahead ones...
450  *
451  * This has been extended to use the NUMA policies from the mm triggering
452  * the readahead.
453  *
454  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
455  */
456 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
457                         struct vm_area_struct *vma, unsigned long addr)
458 {
459         struct page *page;
460         unsigned long entry_offset = swp_offset(entry);
461         unsigned long offset = entry_offset;
462         unsigned long start_offset, end_offset;
463         unsigned long mask;
464         struct blk_plug plug;
465
466         mask = swapin_nr_pages(offset) - 1;
467         if (!mask)
468                 goto skip;
469
470         /* Read a page_cluster sized and aligned cluster around offset. */
471         start_offset = offset & ~mask;
472         end_offset = offset | mask;
473         if (!start_offset)      /* First page is swap header. */
474                 start_offset++;
475
476         blk_start_plug(&plug);
477         for (offset = start_offset; offset <= end_offset ; offset++) {
478                 /* Ok, do the async read-ahead now */
479                 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
480                                                 gfp_mask, vma, addr);
481                 if (!page)
482                         continue;
483                 if (offset != entry_offset)
484                         SetPageReadahead(page);
485                 page_cache_release(page);
486         }
487         blk_finish_plug(&plug);
488
489         lru_add_drain();        /* Push any new pages onto the LRU now */
490 skip:
491         return read_swap_cache_async(entry, gfp_mask, vma, addr);
492 }