cleancache: zap uuid arg of cleancache_init_shared_fs
[cascardo/linux.git] / mm / filemap.c
1 /*
2  *      linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/export.h>
13 #include <linux/compiler.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/aio.h>
17 #include <linux/capability.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/security.h>
32 #include <linux/cpuset.h>
33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
34 #include <linux/hugetlb.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cleancache.h>
37 #include <linux/rmap.h>
38 #include "internal.h"
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/filemap.h>
42
43 /*
44  * FIXME: remove all knowledge of the buffer layer from the core VM
45  */
46 #include <linux/buffer_head.h> /* for try_to_free_buffers */
47
48 #include <asm/mman.h>
49
50 /*
51  * Shared mappings implemented 30.11.1994. It's not fully working yet,
52  * though.
53  *
54  * Shared mappings now work. 15.8.1995  Bruno.
55  *
56  * finished 'unifying' the page and buffer cache and SMP-threaded the
57  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
58  *
59  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
60  */
61
62 /*
63  * Lock ordering:
64  *
65  *  ->i_mmap_rwsem              (truncate_pagecache)
66  *    ->private_lock            (__free_pte->__set_page_dirty_buffers)
67  *      ->swap_lock             (exclusive_swap_page, others)
68  *        ->mapping->tree_lock
69  *
70  *  ->i_mutex
71  *    ->i_mmap_rwsem            (truncate->unmap_mapping_range)
72  *
73  *  ->mmap_sem
74  *    ->i_mmap_rwsem
75  *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
76  *        ->mapping->tree_lock  (arch-dependent flush_dcache_mmap_lock)
77  *
78  *  ->mmap_sem
79  *    ->lock_page               (access_process_vm)
80  *
81  *  ->i_mutex                   (generic_perform_write)
82  *    ->mmap_sem                (fault_in_pages_readable->do_page_fault)
83  *
84  *  bdi->wb.list_lock
85  *    sb_lock                   (fs/fs-writeback.c)
86  *    ->mapping->tree_lock      (__sync_single_inode)
87  *
88  *  ->i_mmap_rwsem
89  *    ->anon_vma.lock           (vma_adjust)
90  *
91  *  ->anon_vma.lock
92  *    ->page_table_lock or pte_lock     (anon_vma_prepare and various)
93  *
94  *  ->page_table_lock or pte_lock
95  *    ->swap_lock               (try_to_unmap_one)
96  *    ->private_lock            (try_to_unmap_one)
97  *    ->tree_lock               (try_to_unmap_one)
98  *    ->zone.lru_lock           (follow_page->mark_page_accessed)
99  *    ->zone.lru_lock           (check_pte_range->isolate_lru_page)
100  *    ->private_lock            (page_remove_rmap->set_page_dirty)
101  *    ->tree_lock               (page_remove_rmap->set_page_dirty)
102  *    bdi.wb->list_lock         (page_remove_rmap->set_page_dirty)
103  *    ->inode->i_lock           (page_remove_rmap->set_page_dirty)
104  *    bdi.wb->list_lock         (zap_pte_range->set_page_dirty)
105  *    ->inode->i_lock           (zap_pte_range->set_page_dirty)
106  *    ->private_lock            (zap_pte_range->__set_page_dirty_buffers)
107  *
108  * ->i_mmap_rwsem
109  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
110  */
111
112 static void page_cache_tree_delete(struct address_space *mapping,
113                                    struct page *page, void *shadow)
114 {
115         struct radix_tree_node *node;
116         unsigned long index;
117         unsigned int offset;
118         unsigned int tag;
119         void **slot;
120
121         VM_BUG_ON(!PageLocked(page));
122
123         __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
124
125         if (shadow) {
126                 mapping->nrshadows++;
127                 /*
128                  * Make sure the nrshadows update is committed before
129                  * the nrpages update so that final truncate racing
130                  * with reclaim does not see both counters 0 at the
131                  * same time and miss a shadow entry.
132                  */
133                 smp_wmb();
134         }
135         mapping->nrpages--;
136
137         if (!node) {
138                 /* Clear direct pointer tags in root node */
139                 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
140                 radix_tree_replace_slot(slot, shadow);
141                 return;
142         }
143
144         /* Clear tree tags for the removed page */
145         index = page->index;
146         offset = index & RADIX_TREE_MAP_MASK;
147         for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
148                 if (test_bit(offset, node->tags[tag]))
149                         radix_tree_tag_clear(&mapping->page_tree, index, tag);
150         }
151
152         /* Delete page, swap shadow entry */
153         radix_tree_replace_slot(slot, shadow);
154         workingset_node_pages_dec(node);
155         if (shadow)
156                 workingset_node_shadows_inc(node);
157         else
158                 if (__radix_tree_delete_node(&mapping->page_tree, node))
159                         return;
160
161         /*
162          * Track node that only contains shadow entries.
163          *
164          * Avoid acquiring the list_lru lock if already tracked.  The
165          * list_empty() test is safe as node->private_list is
166          * protected by mapping->tree_lock.
167          */
168         if (!workingset_node_pages(node) &&
169             list_empty(&node->private_list)) {
170                 node->private_data = mapping;
171                 list_lru_add(&workingset_shadow_nodes, &node->private_list);
172         }
173 }
174
175 /*
176  * Delete a page from the page cache and free it. Caller has to make
177  * sure the page is locked and that nobody else uses it - or that usage
178  * is safe.  The caller must hold the mapping's tree_lock.
179  */
180 void __delete_from_page_cache(struct page *page, void *shadow)
181 {
182         struct address_space *mapping = page->mapping;
183
184         trace_mm_filemap_delete_from_page_cache(page);
185         /*
186          * if we're uptodate, flush out into the cleancache, otherwise
187          * invalidate any existing cleancache entries.  We can't leave
188          * stale data around in the cleancache once our page is gone
189          */
190         if (PageUptodate(page) && PageMappedToDisk(page))
191                 cleancache_put_page(page);
192         else
193                 cleancache_invalidate_page(mapping, page);
194
195         page_cache_tree_delete(mapping, page, shadow);
196
197         page->mapping = NULL;
198         /* Leave page->index set: truncation lookup relies upon it */
199
200         __dec_zone_page_state(page, NR_FILE_PAGES);
201         if (PageSwapBacked(page))
202                 __dec_zone_page_state(page, NR_SHMEM);
203         BUG_ON(page_mapped(page));
204
205         /*
206          * At this point page must be either written or cleaned by truncate.
207          * Dirty page here signals a bug and loss of unwritten data.
208          *
209          * This fixes dirty accounting after removing the page entirely but
210          * leaves PageDirty set: it has no effect for truncated page and
211          * anyway will be cleared before returning page into buddy allocator.
212          */
213         if (WARN_ON_ONCE(PageDirty(page)))
214                 account_page_cleaned(page, mapping);
215 }
216
217 /**
218  * delete_from_page_cache - delete page from page cache
219  * @page: the page which the kernel is trying to remove from page cache
220  *
221  * This must be called only on pages that have been verified to be in the page
222  * cache and locked.  It will never put the page into the free list, the caller
223  * has a reference on the page.
224  */
225 void delete_from_page_cache(struct page *page)
226 {
227         struct address_space *mapping = page->mapping;
228         void (*freepage)(struct page *);
229
230         BUG_ON(!PageLocked(page));
231
232         freepage = mapping->a_ops->freepage;
233         spin_lock_irq(&mapping->tree_lock);
234         __delete_from_page_cache(page, NULL);
235         spin_unlock_irq(&mapping->tree_lock);
236
237         if (freepage)
238                 freepage(page);
239         page_cache_release(page);
240 }
241 EXPORT_SYMBOL(delete_from_page_cache);
242
243 static int filemap_check_errors(struct address_space *mapping)
244 {
245         int ret = 0;
246         /* Check for outstanding write errors */
247         if (test_bit(AS_ENOSPC, &mapping->flags) &&
248             test_and_clear_bit(AS_ENOSPC, &mapping->flags))
249                 ret = -ENOSPC;
250         if (test_bit(AS_EIO, &mapping->flags) &&
251             test_and_clear_bit(AS_EIO, &mapping->flags))
252                 ret = -EIO;
253         return ret;
254 }
255
256 /**
257  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
258  * @mapping:    address space structure to write
259  * @start:      offset in bytes where the range starts
260  * @end:        offset in bytes where the range ends (inclusive)
261  * @sync_mode:  enable synchronous operation
262  *
263  * Start writeback against all of a mapping's dirty pages that lie
264  * within the byte offsets <start, end> inclusive.
265  *
266  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
267  * opposed to a regular memory cleansing writeback.  The difference between
268  * these two operations is that if a dirty page/buffer is encountered, it must
269  * be waited upon, and not just skipped over.
270  */
271 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
272                                 loff_t end, int sync_mode)
273 {
274         int ret;
275         struct writeback_control wbc = {
276                 .sync_mode = sync_mode,
277                 .nr_to_write = LONG_MAX,
278                 .range_start = start,
279                 .range_end = end,
280         };
281
282         if (!mapping_cap_writeback_dirty(mapping))
283                 return 0;
284
285         ret = do_writepages(mapping, &wbc);
286         return ret;
287 }
288
289 static inline int __filemap_fdatawrite(struct address_space *mapping,
290         int sync_mode)
291 {
292         return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
293 }
294
295 int filemap_fdatawrite(struct address_space *mapping)
296 {
297         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
298 }
299 EXPORT_SYMBOL(filemap_fdatawrite);
300
301 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
302                                 loff_t end)
303 {
304         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
305 }
306 EXPORT_SYMBOL(filemap_fdatawrite_range);
307
308 /**
309  * filemap_flush - mostly a non-blocking flush
310  * @mapping:    target address_space
311  *
312  * This is a mostly non-blocking flush.  Not suitable for data-integrity
313  * purposes - I/O may not be started against all dirty pages.
314  */
315 int filemap_flush(struct address_space *mapping)
316 {
317         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
318 }
319 EXPORT_SYMBOL(filemap_flush);
320
321 /**
322  * filemap_fdatawait_range - wait for writeback to complete
323  * @mapping:            address space structure to wait for
324  * @start_byte:         offset in bytes where the range starts
325  * @end_byte:           offset in bytes where the range ends (inclusive)
326  *
327  * Walk the list of under-writeback pages of the given address space
328  * in the given range and wait for all of them.
329  */
330 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
331                             loff_t end_byte)
332 {
333         pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
334         pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
335         struct pagevec pvec;
336         int nr_pages;
337         int ret2, ret = 0;
338
339         if (end_byte < start_byte)
340                 goto out;
341
342         pagevec_init(&pvec, 0);
343         while ((index <= end) &&
344                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
345                         PAGECACHE_TAG_WRITEBACK,
346                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
347                 unsigned i;
348
349                 for (i = 0; i < nr_pages; i++) {
350                         struct page *page = pvec.pages[i];
351
352                         /* until radix tree lookup accepts end_index */
353                         if (page->index > end)
354                                 continue;
355
356                         wait_on_page_writeback(page);
357                         if (TestClearPageError(page))
358                                 ret = -EIO;
359                 }
360                 pagevec_release(&pvec);
361                 cond_resched();
362         }
363 out:
364         ret2 = filemap_check_errors(mapping);
365         if (!ret)
366                 ret = ret2;
367
368         return ret;
369 }
370 EXPORT_SYMBOL(filemap_fdatawait_range);
371
372 /**
373  * filemap_fdatawait - wait for all under-writeback pages to complete
374  * @mapping: address space structure to wait for
375  *
376  * Walk the list of under-writeback pages of the given address space
377  * and wait for all of them.
378  */
379 int filemap_fdatawait(struct address_space *mapping)
380 {
381         loff_t i_size = i_size_read(mapping->host);
382
383         if (i_size == 0)
384                 return 0;
385
386         return filemap_fdatawait_range(mapping, 0, i_size - 1);
387 }
388 EXPORT_SYMBOL(filemap_fdatawait);
389
390 int filemap_write_and_wait(struct address_space *mapping)
391 {
392         int err = 0;
393
394         if (mapping->nrpages) {
395                 err = filemap_fdatawrite(mapping);
396                 /*
397                  * Even if the above returned error, the pages may be
398                  * written partially (e.g. -ENOSPC), so we wait for it.
399                  * But the -EIO is special case, it may indicate the worst
400                  * thing (e.g. bug) happened, so we avoid waiting for it.
401                  */
402                 if (err != -EIO) {
403                         int err2 = filemap_fdatawait(mapping);
404                         if (!err)
405                                 err = err2;
406                 }
407         } else {
408                 err = filemap_check_errors(mapping);
409         }
410         return err;
411 }
412 EXPORT_SYMBOL(filemap_write_and_wait);
413
414 /**
415  * filemap_write_and_wait_range - write out & wait on a file range
416  * @mapping:    the address_space for the pages
417  * @lstart:     offset in bytes where the range starts
418  * @lend:       offset in bytes where the range ends (inclusive)
419  *
420  * Write out and wait upon file offsets lstart->lend, inclusive.
421  *
422  * Note that `lend' is inclusive (describes the last byte to be written) so
423  * that this function can be used to write to the very end-of-file (end = -1).
424  */
425 int filemap_write_and_wait_range(struct address_space *mapping,
426                                  loff_t lstart, loff_t lend)
427 {
428         int err = 0;
429
430         if (mapping->nrpages) {
431                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
432                                                  WB_SYNC_ALL);
433                 /* See comment of filemap_write_and_wait() */
434                 if (err != -EIO) {
435                         int err2 = filemap_fdatawait_range(mapping,
436                                                 lstart, lend);
437                         if (!err)
438                                 err = err2;
439                 }
440         } else {
441                 err = filemap_check_errors(mapping);
442         }
443         return err;
444 }
445 EXPORT_SYMBOL(filemap_write_and_wait_range);
446
447 /**
448  * replace_page_cache_page - replace a pagecache page with a new one
449  * @old:        page to be replaced
450  * @new:        page to replace with
451  * @gfp_mask:   allocation mode
452  *
453  * This function replaces a page in the pagecache with a new one.  On
454  * success it acquires the pagecache reference for the new page and
455  * drops it for the old page.  Both the old and new pages must be
456  * locked.  This function does not add the new page to the LRU, the
457  * caller must do that.
458  *
459  * The remove + add is atomic.  The only way this function can fail is
460  * memory allocation failure.
461  */
462 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
463 {
464         int error;
465
466         VM_BUG_ON_PAGE(!PageLocked(old), old);
467         VM_BUG_ON_PAGE(!PageLocked(new), new);
468         VM_BUG_ON_PAGE(new->mapping, new);
469
470         error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
471         if (!error) {
472                 struct address_space *mapping = old->mapping;
473                 void (*freepage)(struct page *);
474
475                 pgoff_t offset = old->index;
476                 freepage = mapping->a_ops->freepage;
477
478                 page_cache_get(new);
479                 new->mapping = mapping;
480                 new->index = offset;
481
482                 spin_lock_irq(&mapping->tree_lock);
483                 __delete_from_page_cache(old, NULL);
484                 error = radix_tree_insert(&mapping->page_tree, offset, new);
485                 BUG_ON(error);
486                 mapping->nrpages++;
487                 __inc_zone_page_state(new, NR_FILE_PAGES);
488                 if (PageSwapBacked(new))
489                         __inc_zone_page_state(new, NR_SHMEM);
490                 spin_unlock_irq(&mapping->tree_lock);
491                 mem_cgroup_migrate(old, new, true);
492                 radix_tree_preload_end();
493                 if (freepage)
494                         freepage(old);
495                 page_cache_release(old);
496         }
497
498         return error;
499 }
500 EXPORT_SYMBOL_GPL(replace_page_cache_page);
501
502 static int page_cache_tree_insert(struct address_space *mapping,
503                                   struct page *page, void **shadowp)
504 {
505         struct radix_tree_node *node;
506         void **slot;
507         int error;
508
509         error = __radix_tree_create(&mapping->page_tree, page->index,
510                                     &node, &slot);
511         if (error)
512                 return error;
513         if (*slot) {
514                 void *p;
515
516                 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
517                 if (!radix_tree_exceptional_entry(p))
518                         return -EEXIST;
519                 if (shadowp)
520                         *shadowp = p;
521                 mapping->nrshadows--;
522                 if (node)
523                         workingset_node_shadows_dec(node);
524         }
525         radix_tree_replace_slot(slot, page);
526         mapping->nrpages++;
527         if (node) {
528                 workingset_node_pages_inc(node);
529                 /*
530                  * Don't track node that contains actual pages.
531                  *
532                  * Avoid acquiring the list_lru lock if already
533                  * untracked.  The list_empty() test is safe as
534                  * node->private_list is protected by
535                  * mapping->tree_lock.
536                  */
537                 if (!list_empty(&node->private_list))
538                         list_lru_del(&workingset_shadow_nodes,
539                                      &node->private_list);
540         }
541         return 0;
542 }
543
544 static int __add_to_page_cache_locked(struct page *page,
545                                       struct address_space *mapping,
546                                       pgoff_t offset, gfp_t gfp_mask,
547                                       void **shadowp)
548 {
549         int huge = PageHuge(page);
550         struct mem_cgroup *memcg;
551         int error;
552
553         VM_BUG_ON_PAGE(!PageLocked(page), page);
554         VM_BUG_ON_PAGE(PageSwapBacked(page), page);
555
556         if (!huge) {
557                 error = mem_cgroup_try_charge(page, current->mm,
558                                               gfp_mask, &memcg);
559                 if (error)
560                         return error;
561         }
562
563         error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
564         if (error) {
565                 if (!huge)
566                         mem_cgroup_cancel_charge(page, memcg);
567                 return error;
568         }
569
570         page_cache_get(page);
571         page->mapping = mapping;
572         page->index = offset;
573
574         spin_lock_irq(&mapping->tree_lock);
575         error = page_cache_tree_insert(mapping, page, shadowp);
576         radix_tree_preload_end();
577         if (unlikely(error))
578                 goto err_insert;
579         __inc_zone_page_state(page, NR_FILE_PAGES);
580         spin_unlock_irq(&mapping->tree_lock);
581         if (!huge)
582                 mem_cgroup_commit_charge(page, memcg, false);
583         trace_mm_filemap_add_to_page_cache(page);
584         return 0;
585 err_insert:
586         page->mapping = NULL;
587         /* Leave page->index set: truncation relies upon it */
588         spin_unlock_irq(&mapping->tree_lock);
589         if (!huge)
590                 mem_cgroup_cancel_charge(page, memcg);
591         page_cache_release(page);
592         return error;
593 }
594
595 /**
596  * add_to_page_cache_locked - add a locked page to the pagecache
597  * @page:       page to add
598  * @mapping:    the page's address_space
599  * @offset:     page index
600  * @gfp_mask:   page allocation mode
601  *
602  * This function is used to add a page to the pagecache. It must be locked.
603  * This function does not add the page to the LRU.  The caller must do that.
604  */
605 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
606                 pgoff_t offset, gfp_t gfp_mask)
607 {
608         return __add_to_page_cache_locked(page, mapping, offset,
609                                           gfp_mask, NULL);
610 }
611 EXPORT_SYMBOL(add_to_page_cache_locked);
612
613 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
614                                 pgoff_t offset, gfp_t gfp_mask)
615 {
616         void *shadow = NULL;
617         int ret;
618
619         __set_page_locked(page);
620         ret = __add_to_page_cache_locked(page, mapping, offset,
621                                          gfp_mask, &shadow);
622         if (unlikely(ret))
623                 __clear_page_locked(page);
624         else {
625                 /*
626                  * The page might have been evicted from cache only
627                  * recently, in which case it should be activated like
628                  * any other repeatedly accessed page.
629                  */
630                 if (shadow && workingset_refault(shadow)) {
631                         SetPageActive(page);
632                         workingset_activation(page);
633                 } else
634                         ClearPageActive(page);
635                 lru_cache_add(page);
636         }
637         return ret;
638 }
639 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
640
641 #ifdef CONFIG_NUMA
642 struct page *__page_cache_alloc(gfp_t gfp)
643 {
644         int n;
645         struct page *page;
646
647         if (cpuset_do_page_mem_spread()) {
648                 unsigned int cpuset_mems_cookie;
649                 do {
650                         cpuset_mems_cookie = read_mems_allowed_begin();
651                         n = cpuset_mem_spread_node();
652                         page = alloc_pages_exact_node(n, gfp, 0);
653                 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
654
655                 return page;
656         }
657         return alloc_pages(gfp, 0);
658 }
659 EXPORT_SYMBOL(__page_cache_alloc);
660 #endif
661
662 /*
663  * In order to wait for pages to become available there must be
664  * waitqueues associated with pages. By using a hash table of
665  * waitqueues where the bucket discipline is to maintain all
666  * waiters on the same queue and wake all when any of the pages
667  * become available, and for the woken contexts to check to be
668  * sure the appropriate page became available, this saves space
669  * at a cost of "thundering herd" phenomena during rare hash
670  * collisions.
671  */
672 wait_queue_head_t *page_waitqueue(struct page *page)
673 {
674         const struct zone *zone = page_zone(page);
675
676         return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
677 }
678 EXPORT_SYMBOL(page_waitqueue);
679
680 void wait_on_page_bit(struct page *page, int bit_nr)
681 {
682         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
683
684         if (test_bit(bit_nr, &page->flags))
685                 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
686                                                         TASK_UNINTERRUPTIBLE);
687 }
688 EXPORT_SYMBOL(wait_on_page_bit);
689
690 int wait_on_page_bit_killable(struct page *page, int bit_nr)
691 {
692         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
693
694         if (!test_bit(bit_nr, &page->flags))
695                 return 0;
696
697         return __wait_on_bit(page_waitqueue(page), &wait,
698                              bit_wait_io, TASK_KILLABLE);
699 }
700
701 int wait_on_page_bit_killable_timeout(struct page *page,
702                                        int bit_nr, unsigned long timeout)
703 {
704         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
705
706         wait.key.timeout = jiffies + timeout;
707         if (!test_bit(bit_nr, &page->flags))
708                 return 0;
709         return __wait_on_bit(page_waitqueue(page), &wait,
710                              bit_wait_io_timeout, TASK_KILLABLE);
711 }
712 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout);
713
714 /**
715  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
716  * @page: Page defining the wait queue of interest
717  * @waiter: Waiter to add to the queue
718  *
719  * Add an arbitrary @waiter to the wait queue for the nominated @page.
720  */
721 void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
722 {
723         wait_queue_head_t *q = page_waitqueue(page);
724         unsigned long flags;
725
726         spin_lock_irqsave(&q->lock, flags);
727         __add_wait_queue(q, waiter);
728         spin_unlock_irqrestore(&q->lock, flags);
729 }
730 EXPORT_SYMBOL_GPL(add_page_wait_queue);
731
732 /**
733  * unlock_page - unlock a locked page
734  * @page: the page
735  *
736  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
737  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
738  * mechanism between PageLocked pages and PageWriteback pages is shared.
739  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
740  *
741  * The mb is necessary to enforce ordering between the clear_bit and the read
742  * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
743  */
744 void unlock_page(struct page *page)
745 {
746         VM_BUG_ON_PAGE(!PageLocked(page), page);
747         clear_bit_unlock(PG_locked, &page->flags);
748         smp_mb__after_atomic();
749         wake_up_page(page, PG_locked);
750 }
751 EXPORT_SYMBOL(unlock_page);
752
753 /**
754  * end_page_writeback - end writeback against a page
755  * @page: the page
756  */
757 void end_page_writeback(struct page *page)
758 {
759         /*
760          * TestClearPageReclaim could be used here but it is an atomic
761          * operation and overkill in this particular case. Failing to
762          * shuffle a page marked for immediate reclaim is too mild to
763          * justify taking an atomic operation penalty at the end of
764          * ever page writeback.
765          */
766         if (PageReclaim(page)) {
767                 ClearPageReclaim(page);
768                 rotate_reclaimable_page(page);
769         }
770
771         if (!test_clear_page_writeback(page))
772                 BUG();
773
774         smp_mb__after_atomic();
775         wake_up_page(page, PG_writeback);
776 }
777 EXPORT_SYMBOL(end_page_writeback);
778
779 /*
780  * After completing I/O on a page, call this routine to update the page
781  * flags appropriately
782  */
783 void page_endio(struct page *page, int rw, int err)
784 {
785         if (rw == READ) {
786                 if (!err) {
787                         SetPageUptodate(page);
788                 } else {
789                         ClearPageUptodate(page);
790                         SetPageError(page);
791                 }
792                 unlock_page(page);
793         } else { /* rw == WRITE */
794                 if (err) {
795                         SetPageError(page);
796                         if (page->mapping)
797                                 mapping_set_error(page->mapping, err);
798                 }
799                 end_page_writeback(page);
800         }
801 }
802 EXPORT_SYMBOL_GPL(page_endio);
803
804 /**
805  * __lock_page - get a lock on the page, assuming we need to sleep to get it
806  * @page: the page to lock
807  */
808 void __lock_page(struct page *page)
809 {
810         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
811
812         __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
813                                                         TASK_UNINTERRUPTIBLE);
814 }
815 EXPORT_SYMBOL(__lock_page);
816
817 int __lock_page_killable(struct page *page)
818 {
819         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
820
821         return __wait_on_bit_lock(page_waitqueue(page), &wait,
822                                         bit_wait_io, TASK_KILLABLE);
823 }
824 EXPORT_SYMBOL_GPL(__lock_page_killable);
825
826 /*
827  * Return values:
828  * 1 - page is locked; mmap_sem is still held.
829  * 0 - page is not locked.
830  *     mmap_sem has been released (up_read()), unless flags had both
831  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
832  *     which case mmap_sem is still held.
833  *
834  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
835  * with the page locked and the mmap_sem unperturbed.
836  */
837 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
838                          unsigned int flags)
839 {
840         if (flags & FAULT_FLAG_ALLOW_RETRY) {
841                 /*
842                  * CAUTION! In this case, mmap_sem is not released
843                  * even though return 0.
844                  */
845                 if (flags & FAULT_FLAG_RETRY_NOWAIT)
846                         return 0;
847
848                 up_read(&mm->mmap_sem);
849                 if (flags & FAULT_FLAG_KILLABLE)
850                         wait_on_page_locked_killable(page);
851                 else
852                         wait_on_page_locked(page);
853                 return 0;
854         } else {
855                 if (flags & FAULT_FLAG_KILLABLE) {
856                         int ret;
857
858                         ret = __lock_page_killable(page);
859                         if (ret) {
860                                 up_read(&mm->mmap_sem);
861                                 return 0;
862                         }
863                 } else
864                         __lock_page(page);
865                 return 1;
866         }
867 }
868
869 /**
870  * page_cache_next_hole - find the next hole (not-present entry)
871  * @mapping: mapping
872  * @index: index
873  * @max_scan: maximum range to search
874  *
875  * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
876  * lowest indexed hole.
877  *
878  * Returns: the index of the hole if found, otherwise returns an index
879  * outside of the set specified (in which case 'return - index >=
880  * max_scan' will be true). In rare cases of index wrap-around, 0 will
881  * be returned.
882  *
883  * page_cache_next_hole may be called under rcu_read_lock. However,
884  * like radix_tree_gang_lookup, this will not atomically search a
885  * snapshot of the tree at a single point in time. For example, if a
886  * hole is created at index 5, then subsequently a hole is created at
887  * index 10, page_cache_next_hole covering both indexes may return 10
888  * if called under rcu_read_lock.
889  */
890 pgoff_t page_cache_next_hole(struct address_space *mapping,
891                              pgoff_t index, unsigned long max_scan)
892 {
893         unsigned long i;
894
895         for (i = 0; i < max_scan; i++) {
896                 struct page *page;
897
898                 page = radix_tree_lookup(&mapping->page_tree, index);
899                 if (!page || radix_tree_exceptional_entry(page))
900                         break;
901                 index++;
902                 if (index == 0)
903                         break;
904         }
905
906         return index;
907 }
908 EXPORT_SYMBOL(page_cache_next_hole);
909
910 /**
911  * page_cache_prev_hole - find the prev hole (not-present entry)
912  * @mapping: mapping
913  * @index: index
914  * @max_scan: maximum range to search
915  *
916  * Search backwards in the range [max(index-max_scan+1, 0), index] for
917  * the first hole.
918  *
919  * Returns: the index of the hole if found, otherwise returns an index
920  * outside of the set specified (in which case 'index - return >=
921  * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
922  * will be returned.
923  *
924  * page_cache_prev_hole may be called under rcu_read_lock. However,
925  * like radix_tree_gang_lookup, this will not atomically search a
926  * snapshot of the tree at a single point in time. For example, if a
927  * hole is created at index 10, then subsequently a hole is created at
928  * index 5, page_cache_prev_hole covering both indexes may return 5 if
929  * called under rcu_read_lock.
930  */
931 pgoff_t page_cache_prev_hole(struct address_space *mapping,
932                              pgoff_t index, unsigned long max_scan)
933 {
934         unsigned long i;
935
936         for (i = 0; i < max_scan; i++) {
937                 struct page *page;
938
939                 page = radix_tree_lookup(&mapping->page_tree, index);
940                 if (!page || radix_tree_exceptional_entry(page))
941                         break;
942                 index--;
943                 if (index == ULONG_MAX)
944                         break;
945         }
946
947         return index;
948 }
949 EXPORT_SYMBOL(page_cache_prev_hole);
950
951 /**
952  * find_get_entry - find and get a page cache entry
953  * @mapping: the address_space to search
954  * @offset: the page cache index
955  *
956  * Looks up the page cache slot at @mapping & @offset.  If there is a
957  * page cache page, it is returned with an increased refcount.
958  *
959  * If the slot holds a shadow entry of a previously evicted page, or a
960  * swap entry from shmem/tmpfs, it is returned.
961  *
962  * Otherwise, %NULL is returned.
963  */
964 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
965 {
966         void **pagep;
967         struct page *page;
968
969         rcu_read_lock();
970 repeat:
971         page = NULL;
972         pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
973         if (pagep) {
974                 page = radix_tree_deref_slot(pagep);
975                 if (unlikely(!page))
976                         goto out;
977                 if (radix_tree_exception(page)) {
978                         if (radix_tree_deref_retry(page))
979                                 goto repeat;
980                         /*
981                          * A shadow entry of a recently evicted page,
982                          * or a swap entry from shmem/tmpfs.  Return
983                          * it without attempting to raise page count.
984                          */
985                         goto out;
986                 }
987                 if (!page_cache_get_speculative(page))
988                         goto repeat;
989
990                 /*
991                  * Has the page moved?
992                  * This is part of the lockless pagecache protocol. See
993                  * include/linux/pagemap.h for details.
994                  */
995                 if (unlikely(page != *pagep)) {
996                         page_cache_release(page);
997                         goto repeat;
998                 }
999         }
1000 out:
1001         rcu_read_unlock();
1002
1003         return page;
1004 }
1005 EXPORT_SYMBOL(find_get_entry);
1006
1007 /**
1008  * find_lock_entry - locate, pin and lock a page cache entry
1009  * @mapping: the address_space to search
1010  * @offset: the page cache index
1011  *
1012  * Looks up the page cache slot at @mapping & @offset.  If there is a
1013  * page cache page, it is returned locked and with an increased
1014  * refcount.
1015  *
1016  * If the slot holds a shadow entry of a previously evicted page, or a
1017  * swap entry from shmem/tmpfs, it is returned.
1018  *
1019  * Otherwise, %NULL is returned.
1020  *
1021  * find_lock_entry() may sleep.
1022  */
1023 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1024 {
1025         struct page *page;
1026
1027 repeat:
1028         page = find_get_entry(mapping, offset);
1029         if (page && !radix_tree_exception(page)) {
1030                 lock_page(page);
1031                 /* Has the page been truncated? */
1032                 if (unlikely(page->mapping != mapping)) {
1033                         unlock_page(page);
1034                         page_cache_release(page);
1035                         goto repeat;
1036                 }
1037                 VM_BUG_ON_PAGE(page->index != offset, page);
1038         }
1039         return page;
1040 }
1041 EXPORT_SYMBOL(find_lock_entry);
1042
1043 /**
1044  * pagecache_get_page - find and get a page reference
1045  * @mapping: the address_space to search
1046  * @offset: the page index
1047  * @fgp_flags: PCG flags
1048  * @gfp_mask: gfp mask to use for the page cache data page allocation
1049  *
1050  * Looks up the page cache slot at @mapping & @offset.
1051  *
1052  * PCG flags modify how the page is returned.
1053  *
1054  * FGP_ACCESSED: the page will be marked accessed
1055  * FGP_LOCK: Page is return locked
1056  * FGP_CREAT: If page is not present then a new page is allocated using
1057  *              @gfp_mask and added to the page cache and the VM's LRU
1058  *              list. The page is returned locked and with an increased
1059  *              refcount. Otherwise, %NULL is returned.
1060  *
1061  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1062  * if the GFP flags specified for FGP_CREAT are atomic.
1063  *
1064  * If there is a page cache page, it is returned with an increased refcount.
1065  */
1066 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1067         int fgp_flags, gfp_t gfp_mask)
1068 {
1069         struct page *page;
1070
1071 repeat:
1072         page = find_get_entry(mapping, offset);
1073         if (radix_tree_exceptional_entry(page))
1074                 page = NULL;
1075         if (!page)
1076                 goto no_page;
1077
1078         if (fgp_flags & FGP_LOCK) {
1079                 if (fgp_flags & FGP_NOWAIT) {
1080                         if (!trylock_page(page)) {
1081                                 page_cache_release(page);
1082                                 return NULL;
1083                         }
1084                 } else {
1085                         lock_page(page);
1086                 }
1087
1088                 /* Has the page been truncated? */
1089                 if (unlikely(page->mapping != mapping)) {
1090                         unlock_page(page);
1091                         page_cache_release(page);
1092                         goto repeat;
1093                 }
1094                 VM_BUG_ON_PAGE(page->index != offset, page);
1095         }
1096
1097         if (page && (fgp_flags & FGP_ACCESSED))
1098                 mark_page_accessed(page);
1099
1100 no_page:
1101         if (!page && (fgp_flags & FGP_CREAT)) {
1102                 int err;
1103                 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1104                         gfp_mask |= __GFP_WRITE;
1105                 if (fgp_flags & FGP_NOFS)
1106                         gfp_mask &= ~__GFP_FS;
1107
1108                 page = __page_cache_alloc(gfp_mask);
1109                 if (!page)
1110                         return NULL;
1111
1112                 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1113                         fgp_flags |= FGP_LOCK;
1114
1115                 /* Init accessed so avoid atomic mark_page_accessed later */
1116                 if (fgp_flags & FGP_ACCESSED)
1117                         __SetPageReferenced(page);
1118
1119                 err = add_to_page_cache_lru(page, mapping, offset,
1120                                 gfp_mask & GFP_RECLAIM_MASK);
1121                 if (unlikely(err)) {
1122                         page_cache_release(page);
1123                         page = NULL;
1124                         if (err == -EEXIST)
1125                                 goto repeat;
1126                 }
1127         }
1128
1129         return page;
1130 }
1131 EXPORT_SYMBOL(pagecache_get_page);
1132
1133 /**
1134  * find_get_entries - gang pagecache lookup
1135  * @mapping:    The address_space to search
1136  * @start:      The starting page cache index
1137  * @nr_entries: The maximum number of entries
1138  * @entries:    Where the resulting entries are placed
1139  * @indices:    The cache indices corresponding to the entries in @entries
1140  *
1141  * find_get_entries() will search for and return a group of up to
1142  * @nr_entries entries in the mapping.  The entries are placed at
1143  * @entries.  find_get_entries() takes a reference against any actual
1144  * pages it returns.
1145  *
1146  * The search returns a group of mapping-contiguous page cache entries
1147  * with ascending indexes.  There may be holes in the indices due to
1148  * not-present pages.
1149  *
1150  * Any shadow entries of evicted pages, or swap entries from
1151  * shmem/tmpfs, are included in the returned array.
1152  *
1153  * find_get_entries() returns the number of pages and shadow entries
1154  * which were found.
1155  */
1156 unsigned find_get_entries(struct address_space *mapping,
1157                           pgoff_t start, unsigned int nr_entries,
1158                           struct page **entries, pgoff_t *indices)
1159 {
1160         void **slot;
1161         unsigned int ret = 0;
1162         struct radix_tree_iter iter;
1163
1164         if (!nr_entries)
1165                 return 0;
1166
1167         rcu_read_lock();
1168 restart:
1169         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1170                 struct page *page;
1171 repeat:
1172                 page = radix_tree_deref_slot(slot);
1173                 if (unlikely(!page))
1174                         continue;
1175                 if (radix_tree_exception(page)) {
1176                         if (radix_tree_deref_retry(page))
1177                                 goto restart;
1178                         /*
1179                          * A shadow entry of a recently evicted page,
1180                          * or a swap entry from shmem/tmpfs.  Return
1181                          * it without attempting to raise page count.
1182                          */
1183                         goto export;
1184                 }
1185                 if (!page_cache_get_speculative(page))
1186                         goto repeat;
1187
1188                 /* Has the page moved? */
1189                 if (unlikely(page != *slot)) {
1190                         page_cache_release(page);
1191                         goto repeat;
1192                 }
1193 export:
1194                 indices[ret] = iter.index;
1195                 entries[ret] = page;
1196                 if (++ret == nr_entries)
1197                         break;
1198         }
1199         rcu_read_unlock();
1200         return ret;
1201 }
1202
1203 /**
1204  * find_get_pages - gang pagecache lookup
1205  * @mapping:    The address_space to search
1206  * @start:      The starting page index
1207  * @nr_pages:   The maximum number of pages
1208  * @pages:      Where the resulting pages are placed
1209  *
1210  * find_get_pages() will search for and return a group of up to
1211  * @nr_pages pages in the mapping.  The pages are placed at @pages.
1212  * find_get_pages() takes a reference against the returned pages.
1213  *
1214  * The search returns a group of mapping-contiguous pages with ascending
1215  * indexes.  There may be holes in the indices due to not-present pages.
1216  *
1217  * find_get_pages() returns the number of pages which were found.
1218  */
1219 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1220                             unsigned int nr_pages, struct page **pages)
1221 {
1222         struct radix_tree_iter iter;
1223         void **slot;
1224         unsigned ret = 0;
1225
1226         if (unlikely(!nr_pages))
1227                 return 0;
1228
1229         rcu_read_lock();
1230 restart:
1231         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1232                 struct page *page;
1233 repeat:
1234                 page = radix_tree_deref_slot(slot);
1235                 if (unlikely(!page))
1236                         continue;
1237
1238                 if (radix_tree_exception(page)) {
1239                         if (radix_tree_deref_retry(page)) {
1240                                 /*
1241                                  * Transient condition which can only trigger
1242                                  * when entry at index 0 moves out of or back
1243                                  * to root: none yet gotten, safe to restart.
1244                                  */
1245                                 WARN_ON(iter.index);
1246                                 goto restart;
1247                         }
1248                         /*
1249                          * A shadow entry of a recently evicted page,
1250                          * or a swap entry from shmem/tmpfs.  Skip
1251                          * over it.
1252                          */
1253                         continue;
1254                 }
1255
1256                 if (!page_cache_get_speculative(page))
1257                         goto repeat;
1258
1259                 /* Has the page moved? */
1260                 if (unlikely(page != *slot)) {
1261                         page_cache_release(page);
1262                         goto repeat;
1263                 }
1264
1265                 pages[ret] = page;
1266                 if (++ret == nr_pages)
1267                         break;
1268         }
1269
1270         rcu_read_unlock();
1271         return ret;
1272 }
1273
1274 /**
1275  * find_get_pages_contig - gang contiguous pagecache lookup
1276  * @mapping:    The address_space to search
1277  * @index:      The starting page index
1278  * @nr_pages:   The maximum number of pages
1279  * @pages:      Where the resulting pages are placed
1280  *
1281  * find_get_pages_contig() works exactly like find_get_pages(), except
1282  * that the returned number of pages are guaranteed to be contiguous.
1283  *
1284  * find_get_pages_contig() returns the number of pages which were found.
1285  */
1286 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1287                                unsigned int nr_pages, struct page **pages)
1288 {
1289         struct radix_tree_iter iter;
1290         void **slot;
1291         unsigned int ret = 0;
1292
1293         if (unlikely(!nr_pages))
1294                 return 0;
1295
1296         rcu_read_lock();
1297 restart:
1298         radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1299                 struct page *page;
1300 repeat:
1301                 page = radix_tree_deref_slot(slot);
1302                 /* The hole, there no reason to continue */
1303                 if (unlikely(!page))
1304                         break;
1305
1306                 if (radix_tree_exception(page)) {
1307                         if (radix_tree_deref_retry(page)) {
1308                                 /*
1309                                  * Transient condition which can only trigger
1310                                  * when entry at index 0 moves out of or back
1311                                  * to root: none yet gotten, safe to restart.
1312                                  */
1313                                 goto restart;
1314                         }
1315                         /*
1316                          * A shadow entry of a recently evicted page,
1317                          * or a swap entry from shmem/tmpfs.  Stop
1318                          * looking for contiguous pages.
1319                          */
1320                         break;
1321                 }
1322
1323                 if (!page_cache_get_speculative(page))
1324                         goto repeat;
1325
1326                 /* Has the page moved? */
1327                 if (unlikely(page != *slot)) {
1328                         page_cache_release(page);
1329                         goto repeat;
1330                 }
1331
1332                 /*
1333                  * must check mapping and index after taking the ref.
1334                  * otherwise we can get both false positives and false
1335                  * negatives, which is just confusing to the caller.
1336                  */
1337                 if (page->mapping == NULL || page->index != iter.index) {
1338                         page_cache_release(page);
1339                         break;
1340                 }
1341
1342                 pages[ret] = page;
1343                 if (++ret == nr_pages)
1344                         break;
1345         }
1346         rcu_read_unlock();
1347         return ret;
1348 }
1349 EXPORT_SYMBOL(find_get_pages_contig);
1350
1351 /**
1352  * find_get_pages_tag - find and return pages that match @tag
1353  * @mapping:    the address_space to search
1354  * @index:      the starting page index
1355  * @tag:        the tag index
1356  * @nr_pages:   the maximum number of pages
1357  * @pages:      where the resulting pages are placed
1358  *
1359  * Like find_get_pages, except we only return pages which are tagged with
1360  * @tag.   We update @index to index the next page for the traversal.
1361  */
1362 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1363                         int tag, unsigned int nr_pages, struct page **pages)
1364 {
1365         struct radix_tree_iter iter;
1366         void **slot;
1367         unsigned ret = 0;
1368
1369         if (unlikely(!nr_pages))
1370                 return 0;
1371
1372         rcu_read_lock();
1373 restart:
1374         radix_tree_for_each_tagged(slot, &mapping->page_tree,
1375                                    &iter, *index, tag) {
1376                 struct page *page;
1377 repeat:
1378                 page = radix_tree_deref_slot(slot);
1379                 if (unlikely(!page))
1380                         continue;
1381
1382                 if (radix_tree_exception(page)) {
1383                         if (radix_tree_deref_retry(page)) {
1384                                 /*
1385                                  * Transient condition which can only trigger
1386                                  * when entry at index 0 moves out of or back
1387                                  * to root: none yet gotten, safe to restart.
1388                                  */
1389                                 goto restart;
1390                         }
1391                         /*
1392                          * A shadow entry of a recently evicted page.
1393                          *
1394                          * Those entries should never be tagged, but
1395                          * this tree walk is lockless and the tags are
1396                          * looked up in bulk, one radix tree node at a
1397                          * time, so there is a sizable window for page
1398                          * reclaim to evict a page we saw tagged.
1399                          *
1400                          * Skip over it.
1401                          */
1402                         continue;
1403                 }
1404
1405                 if (!page_cache_get_speculative(page))
1406                         goto repeat;
1407
1408                 /* Has the page moved? */
1409                 if (unlikely(page != *slot)) {
1410                         page_cache_release(page);
1411                         goto repeat;
1412                 }
1413
1414                 pages[ret] = page;
1415                 if (++ret == nr_pages)
1416                         break;
1417         }
1418
1419         rcu_read_unlock();
1420
1421         if (ret)
1422                 *index = pages[ret - 1]->index + 1;
1423
1424         return ret;
1425 }
1426 EXPORT_SYMBOL(find_get_pages_tag);
1427
1428 /*
1429  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1430  * a _large_ part of the i/o request. Imagine the worst scenario:
1431  *
1432  *      ---R__________________________________________B__________
1433  *         ^ reading here                             ^ bad block(assume 4k)
1434  *
1435  * read(R) => miss => readahead(R...B) => media error => frustrating retries
1436  * => failing the whole request => read(R) => read(R+1) =>
1437  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1438  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1439  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1440  *
1441  * It is going insane. Fix it by quickly scaling down the readahead size.
1442  */
1443 static void shrink_readahead_size_eio(struct file *filp,
1444                                         struct file_ra_state *ra)
1445 {
1446         ra->ra_pages /= 4;
1447 }
1448
1449 /**
1450  * do_generic_file_read - generic file read routine
1451  * @filp:       the file to read
1452  * @ppos:       current file position
1453  * @iter:       data destination
1454  * @written:    already copied
1455  *
1456  * This is a generic file read routine, and uses the
1457  * mapping->a_ops->readpage() function for the actual low-level stuff.
1458  *
1459  * This is really ugly. But the goto's actually try to clarify some
1460  * of the logic when it comes to error handling etc.
1461  */
1462 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1463                 struct iov_iter *iter, ssize_t written)
1464 {
1465         struct address_space *mapping = filp->f_mapping;
1466         struct inode *inode = mapping->host;
1467         struct file_ra_state *ra = &filp->f_ra;
1468         pgoff_t index;
1469         pgoff_t last_index;
1470         pgoff_t prev_index;
1471         unsigned long offset;      /* offset into pagecache page */
1472         unsigned int prev_offset;
1473         int error = 0;
1474
1475         index = *ppos >> PAGE_CACHE_SHIFT;
1476         prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1477         prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1478         last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1479         offset = *ppos & ~PAGE_CACHE_MASK;
1480
1481         for (;;) {
1482                 struct page *page;
1483                 pgoff_t end_index;
1484                 loff_t isize;
1485                 unsigned long nr, ret;
1486
1487                 cond_resched();
1488 find_page:
1489                 page = find_get_page(mapping, index);
1490                 if (!page) {
1491                         page_cache_sync_readahead(mapping,
1492                                         ra, filp,
1493                                         index, last_index - index);
1494                         page = find_get_page(mapping, index);
1495                         if (unlikely(page == NULL))
1496                                 goto no_cached_page;
1497                 }
1498                 if (PageReadahead(page)) {
1499                         page_cache_async_readahead(mapping,
1500                                         ra, filp, page,
1501                                         index, last_index - index);
1502                 }
1503                 if (!PageUptodate(page)) {
1504                         if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1505                                         !mapping->a_ops->is_partially_uptodate)
1506                                 goto page_not_up_to_date;
1507                         if (!trylock_page(page))
1508                                 goto page_not_up_to_date;
1509                         /* Did it get truncated before we got the lock? */
1510                         if (!page->mapping)
1511                                 goto page_not_up_to_date_locked;
1512                         if (!mapping->a_ops->is_partially_uptodate(page,
1513                                                         offset, iter->count))
1514                                 goto page_not_up_to_date_locked;
1515                         unlock_page(page);
1516                 }
1517 page_ok:
1518                 /*
1519                  * i_size must be checked after we know the page is Uptodate.
1520                  *
1521                  * Checking i_size after the check allows us to calculate
1522                  * the correct value for "nr", which means the zero-filled
1523                  * part of the page is not copied back to userspace (unless
1524                  * another truncate extends the file - this is desired though).
1525                  */
1526
1527                 isize = i_size_read(inode);
1528                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1529                 if (unlikely(!isize || index > end_index)) {
1530                         page_cache_release(page);
1531                         goto out;
1532                 }
1533
1534                 /* nr is the maximum number of bytes to copy from this page */
1535                 nr = PAGE_CACHE_SIZE;
1536                 if (index == end_index) {
1537                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1538                         if (nr <= offset) {
1539                                 page_cache_release(page);
1540                                 goto out;
1541                         }
1542                 }
1543                 nr = nr - offset;
1544
1545                 /* If users can be writing to this page using arbitrary
1546                  * virtual addresses, take care about potential aliasing
1547                  * before reading the page on the kernel side.
1548                  */
1549                 if (mapping_writably_mapped(mapping))
1550                         flush_dcache_page(page);
1551
1552                 /*
1553                  * When a sequential read accesses a page several times,
1554                  * only mark it as accessed the first time.
1555                  */
1556                 if (prev_index != index || offset != prev_offset)
1557                         mark_page_accessed(page);
1558                 prev_index = index;
1559
1560                 /*
1561                  * Ok, we have the page, and it's up-to-date, so
1562                  * now we can copy it to user space...
1563                  */
1564
1565                 ret = copy_page_to_iter(page, offset, nr, iter);
1566                 offset += ret;
1567                 index += offset >> PAGE_CACHE_SHIFT;
1568                 offset &= ~PAGE_CACHE_MASK;
1569                 prev_offset = offset;
1570
1571                 page_cache_release(page);
1572                 written += ret;
1573                 if (!iov_iter_count(iter))
1574                         goto out;
1575                 if (ret < nr) {
1576                         error = -EFAULT;
1577                         goto out;
1578                 }
1579                 continue;
1580
1581 page_not_up_to_date:
1582                 /* Get exclusive access to the page ... */
1583                 error = lock_page_killable(page);
1584                 if (unlikely(error))
1585                         goto readpage_error;
1586
1587 page_not_up_to_date_locked:
1588                 /* Did it get truncated before we got the lock? */
1589                 if (!page->mapping) {
1590                         unlock_page(page);
1591                         page_cache_release(page);
1592                         continue;
1593                 }
1594
1595                 /* Did somebody else fill it already? */
1596                 if (PageUptodate(page)) {
1597                         unlock_page(page);
1598                         goto page_ok;
1599                 }
1600
1601 readpage:
1602                 /*
1603                  * A previous I/O error may have been due to temporary
1604                  * failures, eg. multipath errors.
1605                  * PG_error will be set again if readpage fails.
1606                  */
1607                 ClearPageError(page);
1608                 /* Start the actual read. The read will unlock the page. */
1609                 error = mapping->a_ops->readpage(filp, page);
1610
1611                 if (unlikely(error)) {
1612                         if (error == AOP_TRUNCATED_PAGE) {
1613                                 page_cache_release(page);
1614                                 error = 0;
1615                                 goto find_page;
1616                         }
1617                         goto readpage_error;
1618                 }
1619
1620                 if (!PageUptodate(page)) {
1621                         error = lock_page_killable(page);
1622                         if (unlikely(error))
1623                                 goto readpage_error;
1624                         if (!PageUptodate(page)) {
1625                                 if (page->mapping == NULL) {
1626                                         /*
1627                                          * invalidate_mapping_pages got it
1628                                          */
1629                                         unlock_page(page);
1630                                         page_cache_release(page);
1631                                         goto find_page;
1632                                 }
1633                                 unlock_page(page);
1634                                 shrink_readahead_size_eio(filp, ra);
1635                                 error = -EIO;
1636                                 goto readpage_error;
1637                         }
1638                         unlock_page(page);
1639                 }
1640
1641                 goto page_ok;
1642
1643 readpage_error:
1644                 /* UHHUH! A synchronous read error occurred. Report it */
1645                 page_cache_release(page);
1646                 goto out;
1647
1648 no_cached_page:
1649                 /*
1650                  * Ok, it wasn't cached, so we need to create a new
1651                  * page..
1652                  */
1653                 page = page_cache_alloc_cold(mapping);
1654                 if (!page) {
1655                         error = -ENOMEM;
1656                         goto out;
1657                 }
1658                 error = add_to_page_cache_lru(page, mapping,
1659                                                 index, GFP_KERNEL);
1660                 if (error) {
1661                         page_cache_release(page);
1662                         if (error == -EEXIST) {
1663                                 error = 0;
1664                                 goto find_page;
1665                         }
1666                         goto out;
1667                 }
1668                 goto readpage;
1669         }
1670
1671 out:
1672         ra->prev_pos = prev_index;
1673         ra->prev_pos <<= PAGE_CACHE_SHIFT;
1674         ra->prev_pos |= prev_offset;
1675
1676         *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1677         file_accessed(filp);
1678         return written ? written : error;
1679 }
1680
1681 /**
1682  * generic_file_read_iter - generic filesystem read routine
1683  * @iocb:       kernel I/O control block
1684  * @iter:       destination for the data read
1685  *
1686  * This is the "read_iter()" routine for all filesystems
1687  * that can use the page cache directly.
1688  */
1689 ssize_t
1690 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1691 {
1692         struct file *file = iocb->ki_filp;
1693         ssize_t retval = 0;
1694         loff_t *ppos = &iocb->ki_pos;
1695         loff_t pos = *ppos;
1696
1697         if (io_is_direct(file)) {
1698                 struct address_space *mapping = file->f_mapping;
1699                 struct inode *inode = mapping->host;
1700                 size_t count = iov_iter_count(iter);
1701                 loff_t size;
1702
1703                 if (!count)
1704                         goto out; /* skip atime */
1705                 size = i_size_read(inode);
1706                 retval = filemap_write_and_wait_range(mapping, pos,
1707                                         pos + count - 1);
1708                 if (!retval) {
1709                         struct iov_iter data = *iter;
1710                         retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos);
1711                 }
1712
1713                 if (retval > 0) {
1714                         *ppos = pos + retval;
1715                         iov_iter_advance(iter, retval);
1716                 }
1717
1718                 /*
1719                  * Btrfs can have a short DIO read if we encounter
1720                  * compressed extents, so if there was an error, or if
1721                  * we've already read everything we wanted to, or if
1722                  * there was a short read because we hit EOF, go ahead
1723                  * and return.  Otherwise fallthrough to buffered io for
1724                  * the rest of the read.  Buffered reads will not work for
1725                  * DAX files, so don't bother trying.
1726                  */
1727                 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size ||
1728                     IS_DAX(inode)) {
1729                         file_accessed(file);
1730                         goto out;
1731                 }
1732         }
1733
1734         retval = do_generic_file_read(file, ppos, iter, retval);
1735 out:
1736         return retval;
1737 }
1738 EXPORT_SYMBOL(generic_file_read_iter);
1739
1740 #ifdef CONFIG_MMU
1741 /**
1742  * page_cache_read - adds requested page to the page cache if not already there
1743  * @file:       file to read
1744  * @offset:     page index
1745  *
1746  * This adds the requested page to the page cache if it isn't already there,
1747  * and schedules an I/O to read in its contents from disk.
1748  */
1749 static int page_cache_read(struct file *file, pgoff_t offset)
1750 {
1751         struct address_space *mapping = file->f_mapping;
1752         struct page *page;
1753         int ret;
1754
1755         do {
1756                 page = page_cache_alloc_cold(mapping);
1757                 if (!page)
1758                         return -ENOMEM;
1759
1760                 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1761                 if (ret == 0)
1762                         ret = mapping->a_ops->readpage(file, page);
1763                 else if (ret == -EEXIST)
1764                         ret = 0; /* losing race to add is OK */
1765
1766                 page_cache_release(page);
1767
1768         } while (ret == AOP_TRUNCATED_PAGE);
1769
1770         return ret;
1771 }
1772
1773 #define MMAP_LOTSAMISS  (100)
1774
1775 /*
1776  * Synchronous readahead happens when we don't even find
1777  * a page in the page cache at all.
1778  */
1779 static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1780                                    struct file_ra_state *ra,
1781                                    struct file *file,
1782                                    pgoff_t offset)
1783 {
1784         unsigned long ra_pages;
1785         struct address_space *mapping = file->f_mapping;
1786
1787         /* If we don't want any read-ahead, don't bother */
1788         if (vma->vm_flags & VM_RAND_READ)
1789                 return;
1790         if (!ra->ra_pages)
1791                 return;
1792
1793         if (vma->vm_flags & VM_SEQ_READ) {
1794                 page_cache_sync_readahead(mapping, ra, file, offset,
1795                                           ra->ra_pages);
1796                 return;
1797         }
1798
1799         /* Avoid banging the cache line if not needed */
1800         if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1801                 ra->mmap_miss++;
1802
1803         /*
1804          * Do we miss much more than hit in this file? If so,
1805          * stop bothering with read-ahead. It will only hurt.
1806          */
1807         if (ra->mmap_miss > MMAP_LOTSAMISS)
1808                 return;
1809
1810         /*
1811          * mmap read-around
1812          */
1813         ra_pages = max_sane_readahead(ra->ra_pages);
1814         ra->start = max_t(long, 0, offset - ra_pages / 2);
1815         ra->size = ra_pages;
1816         ra->async_size = ra_pages / 4;
1817         ra_submit(ra, mapping, file);
1818 }
1819
1820 /*
1821  * Asynchronous readahead happens when we find the page and PG_readahead,
1822  * so we want to possibly extend the readahead further..
1823  */
1824 static void do_async_mmap_readahead(struct vm_area_struct *vma,
1825                                     struct file_ra_state *ra,
1826                                     struct file *file,
1827                                     struct page *page,
1828                                     pgoff_t offset)
1829 {
1830         struct address_space *mapping = file->f_mapping;
1831
1832         /* If we don't want any read-ahead, don't bother */
1833         if (vma->vm_flags & VM_RAND_READ)
1834                 return;
1835         if (ra->mmap_miss > 0)
1836                 ra->mmap_miss--;
1837         if (PageReadahead(page))
1838                 page_cache_async_readahead(mapping, ra, file,
1839                                            page, offset, ra->ra_pages);
1840 }
1841
1842 /**
1843  * filemap_fault - read in file data for page fault handling
1844  * @vma:        vma in which the fault was taken
1845  * @vmf:        struct vm_fault containing details of the fault
1846  *
1847  * filemap_fault() is invoked via the vma operations vector for a
1848  * mapped memory region to read in file data during a page fault.
1849  *
1850  * The goto's are kind of ugly, but this streamlines the normal case of having
1851  * it in the page cache, and handles the special cases reasonably without
1852  * having a lot of duplicated code.
1853  *
1854  * vma->vm_mm->mmap_sem must be held on entry.
1855  *
1856  * If our return value has VM_FAULT_RETRY set, it's because
1857  * lock_page_or_retry() returned 0.
1858  * The mmap_sem has usually been released in this case.
1859  * See __lock_page_or_retry() for the exception.
1860  *
1861  * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
1862  * has not been released.
1863  *
1864  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
1865  */
1866 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1867 {
1868         int error;
1869         struct file *file = vma->vm_file;
1870         struct address_space *mapping = file->f_mapping;
1871         struct file_ra_state *ra = &file->f_ra;
1872         struct inode *inode = mapping->host;
1873         pgoff_t offset = vmf->pgoff;
1874         struct page *page;
1875         loff_t size;
1876         int ret = 0;
1877
1878         size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1879         if (offset >= size >> PAGE_CACHE_SHIFT)
1880                 return VM_FAULT_SIGBUS;
1881
1882         /*
1883          * Do we have something in the page cache already?
1884          */
1885         page = find_get_page(mapping, offset);
1886         if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1887                 /*
1888                  * We found the page, so try async readahead before
1889                  * waiting for the lock.
1890                  */
1891                 do_async_mmap_readahead(vma, ra, file, page, offset);
1892         } else if (!page) {
1893                 /* No page in the page cache at all */
1894                 do_sync_mmap_readahead(vma, ra, file, offset);
1895                 count_vm_event(PGMAJFAULT);
1896                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1897                 ret = VM_FAULT_MAJOR;
1898 retry_find:
1899                 page = find_get_page(mapping, offset);
1900                 if (!page)
1901                         goto no_cached_page;
1902         }
1903
1904         if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1905                 page_cache_release(page);
1906                 return ret | VM_FAULT_RETRY;
1907         }
1908
1909         /* Did it get truncated? */
1910         if (unlikely(page->mapping != mapping)) {
1911                 unlock_page(page);
1912                 put_page(page);
1913                 goto retry_find;
1914         }
1915         VM_BUG_ON_PAGE(page->index != offset, page);
1916
1917         /*
1918          * We have a locked page in the page cache, now we need to check
1919          * that it's up-to-date. If not, it is going to be due to an error.
1920          */
1921         if (unlikely(!PageUptodate(page)))
1922                 goto page_not_uptodate;
1923
1924         /*
1925          * Found the page and have a reference on it.
1926          * We must recheck i_size under page lock.
1927          */
1928         size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1929         if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
1930                 unlock_page(page);
1931                 page_cache_release(page);
1932                 return VM_FAULT_SIGBUS;
1933         }
1934
1935         vmf->page = page;
1936         return ret | VM_FAULT_LOCKED;
1937
1938 no_cached_page:
1939         /*
1940          * We're only likely to ever get here if MADV_RANDOM is in
1941          * effect.
1942          */
1943         error = page_cache_read(file, offset);
1944
1945         /*
1946          * The page we want has now been added to the page cache.
1947          * In the unlikely event that someone removed it in the
1948          * meantime, we'll just come back here and read it again.
1949          */
1950         if (error >= 0)
1951                 goto retry_find;
1952
1953         /*
1954          * An error return from page_cache_read can result if the
1955          * system is low on memory, or a problem occurs while trying
1956          * to schedule I/O.
1957          */
1958         if (error == -ENOMEM)
1959                 return VM_FAULT_OOM;
1960         return VM_FAULT_SIGBUS;
1961
1962 page_not_uptodate:
1963         /*
1964          * Umm, take care of errors if the page isn't up-to-date.
1965          * Try to re-read it _once_. We do this synchronously,
1966          * because there really aren't any performance issues here
1967          * and we need to check for errors.
1968          */
1969         ClearPageError(page);
1970         error = mapping->a_ops->readpage(file, page);
1971         if (!error) {
1972                 wait_on_page_locked(page);
1973                 if (!PageUptodate(page))
1974                         error = -EIO;
1975         }
1976         page_cache_release(page);
1977
1978         if (!error || error == AOP_TRUNCATED_PAGE)
1979                 goto retry_find;
1980
1981         /* Things didn't work out. Return zero to tell the mm layer so. */
1982         shrink_readahead_size_eio(file, ra);
1983         return VM_FAULT_SIGBUS;
1984 }
1985 EXPORT_SYMBOL(filemap_fault);
1986
1987 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
1988 {
1989         struct radix_tree_iter iter;
1990         void **slot;
1991         struct file *file = vma->vm_file;
1992         struct address_space *mapping = file->f_mapping;
1993         loff_t size;
1994         struct page *page;
1995         unsigned long address = (unsigned long) vmf->virtual_address;
1996         unsigned long addr;
1997         pte_t *pte;
1998
1999         rcu_read_lock();
2000         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
2001                 if (iter.index > vmf->max_pgoff)
2002                         break;
2003 repeat:
2004                 page = radix_tree_deref_slot(slot);
2005                 if (unlikely(!page))
2006                         goto next;
2007                 if (radix_tree_exception(page)) {
2008                         if (radix_tree_deref_retry(page))
2009                                 break;
2010                         else
2011                                 goto next;
2012                 }
2013
2014                 if (!page_cache_get_speculative(page))
2015                         goto repeat;
2016
2017                 /* Has the page moved? */
2018                 if (unlikely(page != *slot)) {
2019                         page_cache_release(page);
2020                         goto repeat;
2021                 }
2022
2023                 if (!PageUptodate(page) ||
2024                                 PageReadahead(page) ||
2025                                 PageHWPoison(page))
2026                         goto skip;
2027                 if (!trylock_page(page))
2028                         goto skip;
2029
2030                 if (page->mapping != mapping || !PageUptodate(page))
2031                         goto unlock;
2032
2033                 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
2034                 if (page->index >= size >> PAGE_CACHE_SHIFT)
2035                         goto unlock;
2036
2037                 pte = vmf->pte + page->index - vmf->pgoff;
2038                 if (!pte_none(*pte))
2039                         goto unlock;
2040
2041                 if (file->f_ra.mmap_miss > 0)
2042                         file->f_ra.mmap_miss--;
2043                 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2044                 do_set_pte(vma, addr, page, pte, false, false);
2045                 unlock_page(page);
2046                 goto next;
2047 unlock:
2048                 unlock_page(page);
2049 skip:
2050                 page_cache_release(page);
2051 next:
2052                 if (iter.index == vmf->max_pgoff)
2053                         break;
2054         }
2055         rcu_read_unlock();
2056 }
2057 EXPORT_SYMBOL(filemap_map_pages);
2058
2059 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2060 {
2061         struct page *page = vmf->page;
2062         struct inode *inode = file_inode(vma->vm_file);
2063         int ret = VM_FAULT_LOCKED;
2064
2065         sb_start_pagefault(inode->i_sb);
2066         file_update_time(vma->vm_file);
2067         lock_page(page);
2068         if (page->mapping != inode->i_mapping) {
2069                 unlock_page(page);
2070                 ret = VM_FAULT_NOPAGE;
2071                 goto out;
2072         }
2073         /*
2074          * We mark the page dirty already here so that when freeze is in
2075          * progress, we are guaranteed that writeback during freezing will
2076          * see the dirty page and writeprotect it again.
2077          */
2078         set_page_dirty(page);
2079         wait_for_stable_page(page);
2080 out:
2081         sb_end_pagefault(inode->i_sb);
2082         return ret;
2083 }
2084 EXPORT_SYMBOL(filemap_page_mkwrite);
2085
2086 const struct vm_operations_struct generic_file_vm_ops = {
2087         .fault          = filemap_fault,
2088         .map_pages      = filemap_map_pages,
2089         .page_mkwrite   = filemap_page_mkwrite,
2090 };
2091
2092 /* This is used for a general mmap of a disk file */
2093
2094 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2095 {
2096         struct address_space *mapping = file->f_mapping;
2097
2098         if (!mapping->a_ops->readpage)
2099                 return -ENOEXEC;
2100         file_accessed(file);
2101         vma->vm_ops = &generic_file_vm_ops;
2102         return 0;
2103 }
2104
2105 /*
2106  * This is for filesystems which do not implement ->writepage.
2107  */
2108 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2109 {
2110         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2111                 return -EINVAL;
2112         return generic_file_mmap(file, vma);
2113 }
2114 #else
2115 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2116 {
2117         return -ENOSYS;
2118 }
2119 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2120 {
2121         return -ENOSYS;
2122 }
2123 #endif /* CONFIG_MMU */
2124
2125 EXPORT_SYMBOL(generic_file_mmap);
2126 EXPORT_SYMBOL(generic_file_readonly_mmap);
2127
2128 static struct page *wait_on_page_read(struct page *page)
2129 {
2130         if (!IS_ERR(page)) {
2131                 wait_on_page_locked(page);
2132                 if (!PageUptodate(page)) {
2133                         page_cache_release(page);
2134                         page = ERR_PTR(-EIO);
2135                 }
2136         }
2137         return page;
2138 }
2139
2140 static struct page *__read_cache_page(struct address_space *mapping,
2141                                 pgoff_t index,
2142                                 int (*filler)(void *, struct page *),
2143                                 void *data,
2144                                 gfp_t gfp)
2145 {
2146         struct page *page;
2147         int err;
2148 repeat:
2149         page = find_get_page(mapping, index);
2150         if (!page) {
2151                 page = __page_cache_alloc(gfp | __GFP_COLD);
2152                 if (!page)
2153                         return ERR_PTR(-ENOMEM);
2154                 err = add_to_page_cache_lru(page, mapping, index, gfp);
2155                 if (unlikely(err)) {
2156                         page_cache_release(page);
2157                         if (err == -EEXIST)
2158                                 goto repeat;
2159                         /* Presumably ENOMEM for radix tree node */
2160                         return ERR_PTR(err);
2161                 }
2162                 err = filler(data, page);
2163                 if (err < 0) {
2164                         page_cache_release(page);
2165                         page = ERR_PTR(err);
2166                 } else {
2167                         page = wait_on_page_read(page);
2168                 }
2169         }
2170         return page;
2171 }
2172
2173 static struct page *do_read_cache_page(struct address_space *mapping,
2174                                 pgoff_t index,
2175                                 int (*filler)(void *, struct page *),
2176                                 void *data,
2177                                 gfp_t gfp)
2178
2179 {
2180         struct page *page;
2181         int err;
2182
2183 retry:
2184         page = __read_cache_page(mapping, index, filler, data, gfp);
2185         if (IS_ERR(page))
2186                 return page;
2187         if (PageUptodate(page))
2188                 goto out;
2189
2190         lock_page(page);
2191         if (!page->mapping) {
2192                 unlock_page(page);
2193                 page_cache_release(page);
2194                 goto retry;
2195         }
2196         if (PageUptodate(page)) {
2197                 unlock_page(page);
2198                 goto out;
2199         }
2200         err = filler(data, page);
2201         if (err < 0) {
2202                 page_cache_release(page);
2203                 return ERR_PTR(err);
2204         } else {
2205                 page = wait_on_page_read(page);
2206                 if (IS_ERR(page))
2207                         return page;
2208         }
2209 out:
2210         mark_page_accessed(page);
2211         return page;
2212 }
2213
2214 /**
2215  * read_cache_page - read into page cache, fill it if needed
2216  * @mapping:    the page's address_space
2217  * @index:      the page index
2218  * @filler:     function to perform the read
2219  * @data:       first arg to filler(data, page) function, often left as NULL
2220  *
2221  * Read into the page cache. If a page already exists, and PageUptodate() is
2222  * not set, try to fill the page and wait for it to become unlocked.
2223  *
2224  * If the page does not get brought uptodate, return -EIO.
2225  */
2226 struct page *read_cache_page(struct address_space *mapping,
2227                                 pgoff_t index,
2228                                 int (*filler)(void *, struct page *),
2229                                 void *data)
2230 {
2231         return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2232 }
2233 EXPORT_SYMBOL(read_cache_page);
2234
2235 /**
2236  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2237  * @mapping:    the page's address_space
2238  * @index:      the page index
2239  * @gfp:        the page allocator flags to use if allocating
2240  *
2241  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2242  * any new page allocations done using the specified allocation flags.
2243  *
2244  * If the page does not get brought uptodate, return -EIO.
2245  */
2246 struct page *read_cache_page_gfp(struct address_space *mapping,
2247                                 pgoff_t index,
2248                                 gfp_t gfp)
2249 {
2250         filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2251
2252         return do_read_cache_page(mapping, index, filler, NULL, gfp);
2253 }
2254 EXPORT_SYMBOL(read_cache_page_gfp);
2255
2256 /*
2257  * Performs necessary checks before doing a write
2258  *
2259  * Can adjust writing position or amount of bytes to write.
2260  * Returns appropriate error code that caller should return or
2261  * zero in case that write should be allowed.
2262  */
2263 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2264 {
2265         struct inode *inode = file->f_mapping->host;
2266         unsigned long limit = rlimit(RLIMIT_FSIZE);
2267
2268         if (unlikely(*pos < 0))
2269                 return -EINVAL;
2270
2271         if (!isblk) {
2272                 /* FIXME: this is for backwards compatibility with 2.4 */
2273                 if (file->f_flags & O_APPEND)
2274                         *pos = i_size_read(inode);
2275
2276                 if (limit != RLIM_INFINITY) {
2277                         if (*pos >= limit) {
2278                                 send_sig(SIGXFSZ, current, 0);
2279                                 return -EFBIG;
2280                         }
2281                         if (*count > limit - (typeof(limit))*pos) {
2282                                 *count = limit - (typeof(limit))*pos;
2283                         }
2284                 }
2285         }
2286
2287         /*
2288          * LFS rule
2289          */
2290         if (unlikely(*pos + *count > MAX_NON_LFS &&
2291                                 !(file->f_flags & O_LARGEFILE))) {
2292                 if (*pos >= MAX_NON_LFS) {
2293                         return -EFBIG;
2294                 }
2295                 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2296                         *count = MAX_NON_LFS - (unsigned long)*pos;
2297                 }
2298         }
2299
2300         /*
2301          * Are we about to exceed the fs block limit ?
2302          *
2303          * If we have written data it becomes a short write.  If we have
2304          * exceeded without writing data we send a signal and return EFBIG.
2305          * Linus frestrict idea will clean these up nicely..
2306          */
2307         if (likely(!isblk)) {
2308                 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2309                         if (*count || *pos > inode->i_sb->s_maxbytes) {
2310                                 return -EFBIG;
2311                         }
2312                         /* zero-length writes at ->s_maxbytes are OK */
2313                 }
2314
2315                 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2316                         *count = inode->i_sb->s_maxbytes - *pos;
2317         } else {
2318 #ifdef CONFIG_BLOCK
2319                 loff_t isize;
2320                 if (bdev_read_only(I_BDEV(inode)))
2321                         return -EPERM;
2322                 isize = i_size_read(inode);
2323                 if (*pos >= isize) {
2324                         if (*count || *pos > isize)
2325                                 return -ENOSPC;
2326                 }
2327
2328                 if (*pos + *count > isize)
2329                         *count = isize - *pos;
2330 #else
2331                 return -EPERM;
2332 #endif
2333         }
2334         return 0;
2335 }
2336 EXPORT_SYMBOL(generic_write_checks);
2337
2338 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2339                                 loff_t pos, unsigned len, unsigned flags,
2340                                 struct page **pagep, void **fsdata)
2341 {
2342         const struct address_space_operations *aops = mapping->a_ops;
2343
2344         return aops->write_begin(file, mapping, pos, len, flags,
2345                                                         pagep, fsdata);
2346 }
2347 EXPORT_SYMBOL(pagecache_write_begin);
2348
2349 int pagecache_write_end(struct file *file, struct address_space *mapping,
2350                                 loff_t pos, unsigned len, unsigned copied,
2351                                 struct page *page, void *fsdata)
2352 {
2353         const struct address_space_operations *aops = mapping->a_ops;
2354
2355         return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2356 }
2357 EXPORT_SYMBOL(pagecache_write_end);
2358
2359 ssize_t
2360 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2361 {
2362         struct file     *file = iocb->ki_filp;
2363         struct address_space *mapping = file->f_mapping;
2364         struct inode    *inode = mapping->host;
2365         ssize_t         written;
2366         size_t          write_len;
2367         pgoff_t         end;
2368         struct iov_iter data;
2369
2370         write_len = iov_iter_count(from);
2371         end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2372
2373         written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2374         if (written)
2375                 goto out;
2376
2377         /*
2378          * After a write we want buffered reads to be sure to go to disk to get
2379          * the new data.  We invalidate clean cached page from the region we're
2380          * about to write.  We do this *before* the write so that we can return
2381          * without clobbering -EIOCBQUEUED from ->direct_IO().
2382          */
2383         if (mapping->nrpages) {
2384                 written = invalidate_inode_pages2_range(mapping,
2385                                         pos >> PAGE_CACHE_SHIFT, end);
2386                 /*
2387                  * If a page can not be invalidated, return 0 to fall back
2388                  * to buffered write.
2389                  */
2390                 if (written) {
2391                         if (written == -EBUSY)
2392                                 return 0;
2393                         goto out;
2394                 }
2395         }
2396
2397         data = *from;
2398         written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos);
2399
2400         /*
2401          * Finally, try again to invalidate clean pages which might have been
2402          * cached by non-direct readahead, or faulted in by get_user_pages()
2403          * if the source of the write was an mmap'ed region of the file
2404          * we're writing.  Either one is a pretty crazy thing to do,
2405          * so we don't support it 100%.  If this invalidation
2406          * fails, tough, the write still worked...
2407          */
2408         if (mapping->nrpages) {
2409                 invalidate_inode_pages2_range(mapping,
2410                                               pos >> PAGE_CACHE_SHIFT, end);
2411         }
2412
2413         if (written > 0) {
2414                 pos += written;
2415                 iov_iter_advance(from, written);
2416                 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2417                         i_size_write(inode, pos);
2418                         mark_inode_dirty(inode);
2419                 }
2420                 iocb->ki_pos = pos;
2421         }
2422 out:
2423         return written;
2424 }
2425 EXPORT_SYMBOL(generic_file_direct_write);
2426
2427 /*
2428  * Find or create a page at the given pagecache position. Return the locked
2429  * page. This function is specifically for buffered writes.
2430  */
2431 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2432                                         pgoff_t index, unsigned flags)
2433 {
2434         struct page *page;
2435         int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
2436
2437         if (flags & AOP_FLAG_NOFS)
2438                 fgp_flags |= FGP_NOFS;
2439
2440         page = pagecache_get_page(mapping, index, fgp_flags,
2441                         mapping_gfp_mask(mapping));
2442         if (page)
2443                 wait_for_stable_page(page);
2444
2445         return page;
2446 }
2447 EXPORT_SYMBOL(grab_cache_page_write_begin);
2448
2449 ssize_t generic_perform_write(struct file *file,
2450                                 struct iov_iter *i, loff_t pos)
2451 {
2452         struct address_space *mapping = file->f_mapping;
2453         const struct address_space_operations *a_ops = mapping->a_ops;
2454         long status = 0;
2455         ssize_t written = 0;
2456         unsigned int flags = 0;
2457
2458         /*
2459          * Copies from kernel address space cannot fail (NFSD is a big user).
2460          */
2461         if (!iter_is_iovec(i))
2462                 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2463
2464         do {
2465                 struct page *page;
2466                 unsigned long offset;   /* Offset into pagecache page */
2467                 unsigned long bytes;    /* Bytes to write to page */
2468                 size_t copied;          /* Bytes copied from user */
2469                 void *fsdata;
2470
2471                 offset = (pos & (PAGE_CACHE_SIZE - 1));
2472                 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2473                                                 iov_iter_count(i));
2474
2475 again:
2476                 /*
2477                  * Bring in the user page that we will copy from _first_.
2478                  * Otherwise there's a nasty deadlock on copying from the
2479                  * same page as we're writing to, without it being marked
2480                  * up-to-date.
2481                  *
2482                  * Not only is this an optimisation, but it is also required
2483                  * to check that the address is actually valid, when atomic
2484                  * usercopies are used, below.
2485                  */
2486                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2487                         status = -EFAULT;
2488                         break;
2489                 }
2490
2491                 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2492                                                 &page, &fsdata);
2493                 if (unlikely(status < 0))
2494                         break;
2495
2496                 if (mapping_writably_mapped(mapping))
2497                         flush_dcache_page(page);
2498
2499                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2500                 flush_dcache_page(page);
2501
2502                 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2503                                                 page, fsdata);
2504                 if (unlikely(status < 0))
2505                         break;
2506                 copied = status;
2507
2508                 cond_resched();
2509
2510                 iov_iter_advance(i, copied);
2511                 if (unlikely(copied == 0)) {
2512                         /*
2513                          * If we were unable to copy any data at all, we must
2514                          * fall back to a single segment length write.
2515                          *
2516                          * If we didn't fallback here, we could livelock
2517                          * because not all segments in the iov can be copied at
2518                          * once without a pagefault.
2519                          */
2520                         bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2521                                                 iov_iter_single_seg_count(i));
2522                         goto again;
2523                 }
2524                 pos += copied;
2525                 written += copied;
2526
2527                 balance_dirty_pages_ratelimited(mapping);
2528                 if (fatal_signal_pending(current)) {
2529                         status = -EINTR;
2530                         break;
2531                 }
2532         } while (iov_iter_count(i));
2533
2534         return written ? written : status;
2535 }
2536 EXPORT_SYMBOL(generic_perform_write);
2537
2538 /**
2539  * __generic_file_write_iter - write data to a file
2540  * @iocb:       IO state structure (file, offset, etc.)
2541  * @from:       iov_iter with data to write
2542  *
2543  * This function does all the work needed for actually writing data to a
2544  * file. It does all basic checks, removes SUID from the file, updates
2545  * modification times and calls proper subroutines depending on whether we
2546  * do direct IO or a standard buffered write.
2547  *
2548  * It expects i_mutex to be grabbed unless we work on a block device or similar
2549  * object which does not need locking at all.
2550  *
2551  * This function does *not* take care of syncing data in case of O_SYNC write.
2552  * A caller has to handle it. This is mainly due to the fact that we want to
2553  * avoid syncing under i_mutex.
2554  */
2555 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2556 {
2557         struct file *file = iocb->ki_filp;
2558         struct address_space * mapping = file->f_mapping;
2559         struct inode    *inode = mapping->host;
2560         loff_t          pos = iocb->ki_pos;
2561         ssize_t         written = 0;
2562         ssize_t         err;
2563         ssize_t         status;
2564         size_t          count = iov_iter_count(from);
2565
2566         /* We can write back this queue in page reclaim */
2567         current->backing_dev_info = inode_to_bdi(inode);
2568         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2569         if (err)
2570                 goto out;
2571
2572         if (count == 0)
2573                 goto out;
2574
2575         iov_iter_truncate(from, count);
2576
2577         err = file_remove_suid(file);
2578         if (err)
2579                 goto out;
2580
2581         err = file_update_time(file);
2582         if (err)
2583                 goto out;
2584
2585         if (io_is_direct(file)) {
2586                 loff_t endbyte;
2587
2588                 written = generic_file_direct_write(iocb, from, pos);
2589                 /*
2590                  * If the write stopped short of completing, fall back to
2591                  * buffered writes.  Some filesystems do this for writes to
2592                  * holes, for example.  For DAX files, a buffered write will
2593                  * not succeed (even if it did, DAX does not handle dirty
2594                  * page-cache pages correctly).
2595                  */
2596                 if (written < 0 || written == count || IS_DAX(inode))
2597                         goto out;
2598
2599                 pos += written;
2600                 count -= written;
2601
2602                 status = generic_perform_write(file, from, pos);
2603                 /*
2604                  * If generic_perform_write() returned a synchronous error
2605                  * then we want to return the number of bytes which were
2606                  * direct-written, or the error code if that was zero.  Note
2607                  * that this differs from normal direct-io semantics, which
2608                  * will return -EFOO even if some bytes were written.
2609                  */
2610                 if (unlikely(status < 0)) {
2611                         err = status;
2612                         goto out;
2613                 }
2614                 iocb->ki_pos = pos + status;
2615                 /*
2616                  * We need to ensure that the page cache pages are written to
2617                  * disk and invalidated to preserve the expected O_DIRECT
2618                  * semantics.
2619                  */
2620                 endbyte = pos + status - 1;
2621                 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2622                 if (err == 0) {
2623                         written += status;
2624                         invalidate_mapping_pages(mapping,
2625                                                  pos >> PAGE_CACHE_SHIFT,
2626                                                  endbyte >> PAGE_CACHE_SHIFT);
2627                 } else {
2628                         /*
2629                          * We don't know how much we wrote, so just return
2630                          * the number of bytes which were direct-written
2631                          */
2632                 }
2633         } else {
2634                 written = generic_perform_write(file, from, pos);
2635                 if (likely(written >= 0))
2636                         iocb->ki_pos = pos + written;
2637         }
2638 out:
2639         current->backing_dev_info = NULL;
2640         return written ? written : err;
2641 }
2642 EXPORT_SYMBOL(__generic_file_write_iter);
2643
2644 /**
2645  * generic_file_write_iter - write data to a file
2646  * @iocb:       IO state structure
2647  * @from:       iov_iter with data to write
2648  *
2649  * This is a wrapper around __generic_file_write_iter() to be used by most
2650  * filesystems. It takes care of syncing the file in case of O_SYNC file
2651  * and acquires i_mutex as needed.
2652  */
2653 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2654 {
2655         struct file *file = iocb->ki_filp;
2656         struct inode *inode = file->f_mapping->host;
2657         ssize_t ret;
2658
2659         mutex_lock(&inode->i_mutex);
2660         ret = __generic_file_write_iter(iocb, from);
2661         mutex_unlock(&inode->i_mutex);
2662
2663         if (ret > 0) {
2664                 ssize_t err;
2665
2666                 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2667                 if (err < 0)
2668                         ret = err;
2669         }
2670         return ret;
2671 }
2672 EXPORT_SYMBOL(generic_file_write_iter);
2673
2674 /**
2675  * try_to_release_page() - release old fs-specific metadata on a page
2676  *
2677  * @page: the page which the kernel is trying to free
2678  * @gfp_mask: memory allocation flags (and I/O mode)
2679  *
2680  * The address_space is to try to release any data against the page
2681  * (presumably at page->private).  If the release was successful, return `1'.
2682  * Otherwise return zero.
2683  *
2684  * This may also be called if PG_fscache is set on a page, indicating that the
2685  * page is known to the local caching routines.
2686  *
2687  * The @gfp_mask argument specifies whether I/O may be performed to release
2688  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2689  *
2690  */
2691 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2692 {
2693         struct address_space * const mapping = page->mapping;
2694
2695         BUG_ON(!PageLocked(page));
2696         if (PageWriteback(page))
2697                 return 0;
2698
2699         if (mapping && mapping->a_ops->releasepage)
2700                 return mapping->a_ops->releasepage(page, gfp_mask);
2701         return try_to_free_buffers(page);
2702 }
2703
2704 EXPORT_SYMBOL(try_to_release_page);