Merge tag 'v3.16-rc1' into x86/cpufeature
[cascardo/linux.git] / fs / cachefiles / rdwr.c
1 /* Storage object read/write
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
16 #include "internal.h"
17
18 /*
19  * detect wake up events generated by the unlocking of pages in which we're
20  * interested
21  * - we use this to detect read completion of backing pages
22  * - the caller holds the waitqueue lock
23  */
24 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
25                                   int sync, void *_key)
26 {
27         struct cachefiles_one_read *monitor =
28                 container_of(wait, struct cachefiles_one_read, monitor);
29         struct cachefiles_object *object;
30         struct wait_bit_key *key = _key;
31         struct page *page = wait->private;
32
33         ASSERT(key);
34
35         _enter("{%lu},%u,%d,{%p,%u}",
36                monitor->netfs_page->index, mode, sync,
37                key->flags, key->bit_nr);
38
39         if (key->flags != &page->flags ||
40             key->bit_nr != PG_locked)
41                 return 0;
42
43         _debug("--- monitor %p %lx ---", page, page->flags);
44
45         if (!PageUptodate(page) && !PageError(page)) {
46                 /* unlocked, not uptodate and not erronous? */
47                 _debug("page probably truncated");
48         }
49
50         /* remove from the waitqueue */
51         list_del(&wait->task_list);
52
53         /* move onto the action list and queue for FS-Cache thread pool */
54         ASSERT(monitor->op);
55
56         object = container_of(monitor->op->op.object,
57                               struct cachefiles_object, fscache);
58
59         spin_lock(&object->work_lock);
60         list_add_tail(&monitor->op_link, &monitor->op->to_do);
61         spin_unlock(&object->work_lock);
62
63         fscache_enqueue_retrieval(monitor->op);
64         return 0;
65 }
66
67 /*
68  * handle a probably truncated page
69  * - check to see if the page is still relevant and reissue the read if
70  *   possible
71  * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
72  *   must wait again and 0 if successful
73  */
74 static int cachefiles_read_reissue(struct cachefiles_object *object,
75                                    struct cachefiles_one_read *monitor)
76 {
77         struct address_space *bmapping = object->backer->d_inode->i_mapping;
78         struct page *backpage = monitor->back_page, *backpage2;
79         int ret;
80
81         _enter("{ino=%lx},{%lx,%lx}",
82                object->backer->d_inode->i_ino,
83                backpage->index, backpage->flags);
84
85         /* skip if the page was truncated away completely */
86         if (backpage->mapping != bmapping) {
87                 _leave(" = -ENODATA [mapping]");
88                 return -ENODATA;
89         }
90
91         backpage2 = find_get_page(bmapping, backpage->index);
92         if (!backpage2) {
93                 _leave(" = -ENODATA [gone]");
94                 return -ENODATA;
95         }
96
97         if (backpage != backpage2) {
98                 put_page(backpage2);
99                 _leave(" = -ENODATA [different]");
100                 return -ENODATA;
101         }
102
103         /* the page is still there and we already have a ref on it, so we don't
104          * need a second */
105         put_page(backpage2);
106
107         INIT_LIST_HEAD(&monitor->op_link);
108         add_page_wait_queue(backpage, &monitor->monitor);
109
110         if (trylock_page(backpage)) {
111                 ret = -EIO;
112                 if (PageError(backpage))
113                         goto unlock_discard;
114                 ret = 0;
115                 if (PageUptodate(backpage))
116                         goto unlock_discard;
117
118                 _debug("reissue read");
119                 ret = bmapping->a_ops->readpage(NULL, backpage);
120                 if (ret < 0)
121                         goto unlock_discard;
122         }
123
124         /* but the page may have been read before the monitor was installed, so
125          * the monitor may miss the event - so we have to ensure that we do get
126          * one in such a case */
127         if (trylock_page(backpage)) {
128                 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
129                 unlock_page(backpage);
130         }
131
132         /* it'll reappear on the todo list */
133         _leave(" = -EINPROGRESS");
134         return -EINPROGRESS;
135
136 unlock_discard:
137         unlock_page(backpage);
138         spin_lock_irq(&object->work_lock);
139         list_del(&monitor->op_link);
140         spin_unlock_irq(&object->work_lock);
141         _leave(" = %d", ret);
142         return ret;
143 }
144
145 /*
146  * copy data from backing pages to netfs pages to complete a read operation
147  * - driven by FS-Cache's thread pool
148  */
149 static void cachefiles_read_copier(struct fscache_operation *_op)
150 {
151         struct cachefiles_one_read *monitor;
152         struct cachefiles_object *object;
153         struct fscache_retrieval *op;
154         struct pagevec pagevec;
155         int error, max;
156
157         op = container_of(_op, struct fscache_retrieval, op);
158         object = container_of(op->op.object,
159                               struct cachefiles_object, fscache);
160
161         _enter("{ino=%lu}", object->backer->d_inode->i_ino);
162
163         pagevec_init(&pagevec, 0);
164
165         max = 8;
166         spin_lock_irq(&object->work_lock);
167
168         while (!list_empty(&op->to_do)) {
169                 monitor = list_entry(op->to_do.next,
170                                      struct cachefiles_one_read, op_link);
171                 list_del(&monitor->op_link);
172
173                 spin_unlock_irq(&object->work_lock);
174
175                 _debug("- copy {%lu}", monitor->back_page->index);
176
177         recheck:
178                 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179                              &object->fscache.cookie->flags)) {
180                         error = -ESTALE;
181                 } else if (PageUptodate(monitor->back_page)) {
182                         copy_highpage(monitor->netfs_page, monitor->back_page);
183                         fscache_mark_page_cached(monitor->op,
184                                                  monitor->netfs_page);
185                         error = 0;
186                 } else if (!PageError(monitor->back_page)) {
187                         /* the page has probably been truncated */
188                         error = cachefiles_read_reissue(object, monitor);
189                         if (error == -EINPROGRESS)
190                                 goto next;
191                         goto recheck;
192                 } else {
193                         cachefiles_io_error_obj(
194                                 object,
195                                 "Readpage failed on backing file %lx",
196                                 (unsigned long) monitor->back_page->flags);
197                         error = -EIO;
198                 }
199
200                 page_cache_release(monitor->back_page);
201
202                 fscache_end_io(op, monitor->netfs_page, error);
203                 page_cache_release(monitor->netfs_page);
204                 fscache_retrieval_complete(op, 1);
205                 fscache_put_retrieval(op);
206                 kfree(monitor);
207
208         next:
209                 /* let the thread pool have some air occasionally */
210                 max--;
211                 if (max < 0 || need_resched()) {
212                         if (!list_empty(&op->to_do))
213                                 fscache_enqueue_retrieval(op);
214                         _leave(" [maxed out]");
215                         return;
216                 }
217
218                 spin_lock_irq(&object->work_lock);
219         }
220
221         spin_unlock_irq(&object->work_lock);
222         _leave("");
223 }
224
225 /*
226  * read the corresponding page to the given set from the backing file
227  * - an uncertain page is simply discarded, to be tried again another time
228  */
229 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230                                             struct fscache_retrieval *op,
231                                             struct page *netpage)
232 {
233         struct cachefiles_one_read *monitor;
234         struct address_space *bmapping;
235         struct page *newpage, *backpage;
236         int ret;
237
238         _enter("");
239
240         _debug("read back %p{%lu,%d}",
241                netpage, netpage->index, page_count(netpage));
242
243         monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244         if (!monitor)
245                 goto nomem;
246
247         monitor->netfs_page = netpage;
248         monitor->op = fscache_get_retrieval(op);
249
250         init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
251
252         /* attempt to get hold of the backing page */
253         bmapping = object->backer->d_inode->i_mapping;
254         newpage = NULL;
255
256         for (;;) {
257                 backpage = find_get_page(bmapping, netpage->index);
258                 if (backpage)
259                         goto backing_page_already_present;
260
261                 if (!newpage) {
262                         newpage = __page_cache_alloc(cachefiles_gfp |
263                                                      __GFP_COLD);
264                         if (!newpage)
265                                 goto nomem_monitor;
266                 }
267
268                 ret = add_to_page_cache_lru(newpage, bmapping,
269                                             netpage->index, cachefiles_gfp);
270                 if (ret == 0)
271                         goto installed_new_backing_page;
272                 if (ret != -EEXIST)
273                         goto nomem_page;
274         }
275
276         /* we've installed a new backing page, so now we need to start
277          * it reading */
278 installed_new_backing_page:
279         _debug("- new %p", newpage);
280
281         backpage = newpage;
282         newpage = NULL;
283
284 read_backing_page:
285         ret = bmapping->a_ops->readpage(NULL, backpage);
286         if (ret < 0)
287                 goto read_error;
288
289         /* set the monitor to transfer the data across */
290 monitor_backing_page:
291         _debug("- monitor add");
292
293         /* install the monitor */
294         page_cache_get(monitor->netfs_page);
295         page_cache_get(backpage);
296         monitor->back_page = backpage;
297         monitor->monitor.private = backpage;
298         add_page_wait_queue(backpage, &monitor->monitor);
299         monitor = NULL;
300
301         /* but the page may have been read before the monitor was installed, so
302          * the monitor may miss the event - so we have to ensure that we do get
303          * one in such a case */
304         if (trylock_page(backpage)) {
305                 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
306                 unlock_page(backpage);
307         }
308         goto success;
309
310         /* if the backing page is already present, it can be in one of
311          * three states: read in progress, read failed or read okay */
312 backing_page_already_present:
313         _debug("- present");
314
315         if (newpage) {
316                 page_cache_release(newpage);
317                 newpage = NULL;
318         }
319
320         if (PageError(backpage))
321                 goto io_error;
322
323         if (PageUptodate(backpage))
324                 goto backing_page_already_uptodate;
325
326         if (!trylock_page(backpage))
327                 goto monitor_backing_page;
328         _debug("read %p {%lx}", backpage, backpage->flags);
329         goto read_backing_page;
330
331         /* the backing page is already up to date, attach the netfs
332          * page to the pagecache and LRU and copy the data across */
333 backing_page_already_uptodate:
334         _debug("- uptodate");
335
336         fscache_mark_page_cached(op, netpage);
337
338         copy_highpage(netpage, backpage);
339         fscache_end_io(op, netpage, 0);
340         fscache_retrieval_complete(op, 1);
341
342 success:
343         _debug("success");
344         ret = 0;
345
346 out:
347         if (backpage)
348                 page_cache_release(backpage);
349         if (monitor) {
350                 fscache_put_retrieval(monitor->op);
351                 kfree(monitor);
352         }
353         _leave(" = %d", ret);
354         return ret;
355
356 read_error:
357         _debug("read error %d", ret);
358         if (ret == -ENOMEM) {
359                 fscache_retrieval_complete(op, 1);
360                 goto out;
361         }
362 io_error:
363         cachefiles_io_error_obj(object, "Page read error on backing file");
364         fscache_retrieval_complete(op, 1);
365         ret = -ENOBUFS;
366         goto out;
367
368 nomem_page:
369         page_cache_release(newpage);
370 nomem_monitor:
371         fscache_put_retrieval(monitor->op);
372         kfree(monitor);
373 nomem:
374         fscache_retrieval_complete(op, 1);
375         _leave(" = -ENOMEM");
376         return -ENOMEM;
377 }
378
379 /*
380  * read a page from the cache or allocate a block in which to store it
381  * - cache withdrawal is prevented by the caller
382  * - returns -EINTR if interrupted
383  * - returns -ENOMEM if ran out of memory
384  * - returns -ENOBUFS if no buffers can be made available
385  * - returns -ENOBUFS if page is beyond EOF
386  * - if the page is backed by a block in the cache:
387  *   - a read will be started which will call the callback on completion
388  *   - 0 will be returned
389  * - else if the page is unbacked:
390  *   - the metadata will be retained
391  *   - -ENODATA will be returned
392  */
393 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
394                                   struct page *page,
395                                   gfp_t gfp)
396 {
397         struct cachefiles_object *object;
398         struct cachefiles_cache *cache;
399         struct pagevec pagevec;
400         struct inode *inode;
401         sector_t block0, block;
402         unsigned shift;
403         int ret;
404
405         object = container_of(op->op.object,
406                               struct cachefiles_object, fscache);
407         cache = container_of(object->fscache.cache,
408                              struct cachefiles_cache, cache);
409
410         _enter("{%p},{%lx},,,", object, page->index);
411
412         if (!object->backer)
413                 goto enobufs;
414
415         inode = object->backer->d_inode;
416         ASSERT(S_ISREG(inode->i_mode));
417         ASSERT(inode->i_mapping->a_ops->bmap);
418         ASSERT(inode->i_mapping->a_ops->readpages);
419
420         /* calculate the shift required to use bmap */
421         if (inode->i_sb->s_blocksize > PAGE_SIZE)
422                 goto enobufs;
423
424         shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
425
426         op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
427         op->op.flags |= FSCACHE_OP_ASYNC;
428         op->op.processor = cachefiles_read_copier;
429
430         pagevec_init(&pagevec, 0);
431
432         /* we assume the absence or presence of the first block is a good
433          * enough indication for the page as a whole
434          * - TODO: don't use bmap() for this as it is _not_ actually good
435          *   enough for this as it doesn't indicate errors, but it's all we've
436          *   got for the moment
437          */
438         block0 = page->index;
439         block0 <<= shift;
440
441         block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
442         _debug("%llx -> %llx",
443                (unsigned long long) block0,
444                (unsigned long long) block);
445
446         if (block) {
447                 /* submit the apparently valid page to the backing fs to be
448                  * read from disk */
449                 ret = cachefiles_read_backing_file_one(object, op, page);
450         } else if (cachefiles_has_space(cache, 0, 1) == 0) {
451                 /* there's space in the cache we can use */
452                 fscache_mark_page_cached(op, page);
453                 fscache_retrieval_complete(op, 1);
454                 ret = -ENODATA;
455         } else {
456                 goto enobufs;
457         }
458
459         _leave(" = %d", ret);
460         return ret;
461
462 enobufs:
463         fscache_retrieval_complete(op, 1);
464         _leave(" = -ENOBUFS");
465         return -ENOBUFS;
466 }
467
468 /*
469  * read the corresponding pages to the given set from the backing file
470  * - any uncertain pages are simply discarded, to be tried again another time
471  */
472 static int cachefiles_read_backing_file(struct cachefiles_object *object,
473                                         struct fscache_retrieval *op,
474                                         struct list_head *list)
475 {
476         struct cachefiles_one_read *monitor = NULL;
477         struct address_space *bmapping = object->backer->d_inode->i_mapping;
478         struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
479         int ret = 0;
480
481         _enter("");
482
483         list_for_each_entry_safe(netpage, _n, list, lru) {
484                 list_del(&netpage->lru);
485
486                 _debug("read back %p{%lu,%d}",
487                        netpage, netpage->index, page_count(netpage));
488
489                 if (!monitor) {
490                         monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
491                         if (!monitor)
492                                 goto nomem;
493
494                         monitor->op = fscache_get_retrieval(op);
495                         init_waitqueue_func_entry(&monitor->monitor,
496                                                   cachefiles_read_waiter);
497                 }
498
499                 for (;;) {
500                         backpage = find_get_page(bmapping, netpage->index);
501                         if (backpage)
502                                 goto backing_page_already_present;
503
504                         if (!newpage) {
505                                 newpage = __page_cache_alloc(cachefiles_gfp |
506                                                              __GFP_COLD);
507                                 if (!newpage)
508                                         goto nomem;
509                         }
510
511                         ret = add_to_page_cache_lru(newpage, bmapping,
512                                                     netpage->index,
513                                                     cachefiles_gfp);
514                         if (ret == 0)
515                                 goto installed_new_backing_page;
516                         if (ret != -EEXIST)
517                                 goto nomem;
518                 }
519
520                 /* we've installed a new backing page, so now we need
521                  * to start it reading */
522         installed_new_backing_page:
523                 _debug("- new %p", newpage);
524
525                 backpage = newpage;
526                 newpage = NULL;
527
528         reread_backing_page:
529                 ret = bmapping->a_ops->readpage(NULL, backpage);
530                 if (ret < 0)
531                         goto read_error;
532
533                 /* add the netfs page to the pagecache and LRU, and set the
534                  * monitor to transfer the data across */
535         monitor_backing_page:
536                 _debug("- monitor add");
537
538                 ret = add_to_page_cache_lru(netpage, op->mapping,
539                                             netpage->index, cachefiles_gfp);
540                 if (ret < 0) {
541                         if (ret == -EEXIST) {
542                                 page_cache_release(netpage);
543                                 fscache_retrieval_complete(op, 1);
544                                 continue;
545                         }
546                         goto nomem;
547                 }
548
549                 /* install a monitor */
550                 page_cache_get(netpage);
551                 monitor->netfs_page = netpage;
552
553                 page_cache_get(backpage);
554                 monitor->back_page = backpage;
555                 monitor->monitor.private = backpage;
556                 add_page_wait_queue(backpage, &monitor->monitor);
557                 monitor = NULL;
558
559                 /* but the page may have been read before the monitor was
560                  * installed, so the monitor may miss the event - so we have to
561                  * ensure that we do get one in such a case */
562                 if (trylock_page(backpage)) {
563                         _debug("2unlock %p {%lx}", backpage, backpage->flags);
564                         unlock_page(backpage);
565                 }
566
567                 page_cache_release(backpage);
568                 backpage = NULL;
569
570                 page_cache_release(netpage);
571                 netpage = NULL;
572                 continue;
573
574                 /* if the backing page is already present, it can be in one of
575                  * three states: read in progress, read failed or read okay */
576         backing_page_already_present:
577                 _debug("- present %p", backpage);
578
579                 if (PageError(backpage))
580                         goto io_error;
581
582                 if (PageUptodate(backpage))
583                         goto backing_page_already_uptodate;
584
585                 _debug("- not ready %p{%lx}", backpage, backpage->flags);
586
587                 if (!trylock_page(backpage))
588                         goto monitor_backing_page;
589
590                 if (PageError(backpage)) {
591                         _debug("error %lx", backpage->flags);
592                         unlock_page(backpage);
593                         goto io_error;
594                 }
595
596                 if (PageUptodate(backpage))
597                         goto backing_page_already_uptodate_unlock;
598
599                 /* we've locked a page that's neither up to date nor erroneous,
600                  * so we need to attempt to read it again */
601                 goto reread_backing_page;
602
603                 /* the backing page is already up to date, attach the netfs
604                  * page to the pagecache and LRU and copy the data across */
605         backing_page_already_uptodate_unlock:
606                 _debug("uptodate %lx", backpage->flags);
607                 unlock_page(backpage);
608         backing_page_already_uptodate:
609                 _debug("- uptodate");
610
611                 ret = add_to_page_cache_lru(netpage, op->mapping,
612                                             netpage->index, cachefiles_gfp);
613                 if (ret < 0) {
614                         if (ret == -EEXIST) {
615                                 page_cache_release(netpage);
616                                 fscache_retrieval_complete(op, 1);
617                                 continue;
618                         }
619                         goto nomem;
620                 }
621
622                 copy_highpage(netpage, backpage);
623
624                 page_cache_release(backpage);
625                 backpage = NULL;
626
627                 fscache_mark_page_cached(op, netpage);
628
629                 /* the netpage is unlocked and marked up to date here */
630                 fscache_end_io(op, netpage, 0);
631                 page_cache_release(netpage);
632                 netpage = NULL;
633                 fscache_retrieval_complete(op, 1);
634                 continue;
635         }
636
637         netpage = NULL;
638
639         _debug("out");
640
641 out:
642         /* tidy up */
643         if (newpage)
644                 page_cache_release(newpage);
645         if (netpage)
646                 page_cache_release(netpage);
647         if (backpage)
648                 page_cache_release(backpage);
649         if (monitor) {
650                 fscache_put_retrieval(op);
651                 kfree(monitor);
652         }
653
654         list_for_each_entry_safe(netpage, _n, list, lru) {
655                 list_del(&netpage->lru);
656                 page_cache_release(netpage);
657                 fscache_retrieval_complete(op, 1);
658         }
659
660         _leave(" = %d", ret);
661         return ret;
662
663 nomem:
664         _debug("nomem");
665         ret = -ENOMEM;
666         goto record_page_complete;
667
668 read_error:
669         _debug("read error %d", ret);
670         if (ret == -ENOMEM)
671                 goto record_page_complete;
672 io_error:
673         cachefiles_io_error_obj(object, "Page read error on backing file");
674         ret = -ENOBUFS;
675 record_page_complete:
676         fscache_retrieval_complete(op, 1);
677         goto out;
678 }
679
680 /*
681  * read a list of pages from the cache or allocate blocks in which to store
682  * them
683  */
684 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
685                                    struct list_head *pages,
686                                    unsigned *nr_pages,
687                                    gfp_t gfp)
688 {
689         struct cachefiles_object *object;
690         struct cachefiles_cache *cache;
691         struct list_head backpages;
692         struct pagevec pagevec;
693         struct inode *inode;
694         struct page *page, *_n;
695         unsigned shift, nrbackpages;
696         int ret, ret2, space;
697
698         object = container_of(op->op.object,
699                               struct cachefiles_object, fscache);
700         cache = container_of(object->fscache.cache,
701                              struct cachefiles_cache, cache);
702
703         _enter("{OBJ%x,%d},,%d,,",
704                object->fscache.debug_id, atomic_read(&op->op.usage),
705                *nr_pages);
706
707         if (!object->backer)
708                 goto all_enobufs;
709
710         space = 1;
711         if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
712                 space = 0;
713
714         inode = object->backer->d_inode;
715         ASSERT(S_ISREG(inode->i_mode));
716         ASSERT(inode->i_mapping->a_ops->bmap);
717         ASSERT(inode->i_mapping->a_ops->readpages);
718
719         /* calculate the shift required to use bmap */
720         if (inode->i_sb->s_blocksize > PAGE_SIZE)
721                 goto all_enobufs;
722
723         shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
724
725         pagevec_init(&pagevec, 0);
726
727         op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
728         op->op.flags |= FSCACHE_OP_ASYNC;
729         op->op.processor = cachefiles_read_copier;
730
731         INIT_LIST_HEAD(&backpages);
732         nrbackpages = 0;
733
734         ret = space ? -ENODATA : -ENOBUFS;
735         list_for_each_entry_safe(page, _n, pages, lru) {
736                 sector_t block0, block;
737
738                 /* we assume the absence or presence of the first block is a
739                  * good enough indication for the page as a whole
740                  * - TODO: don't use bmap() for this as it is _not_ actually
741                  *   good enough for this as it doesn't indicate errors, but
742                  *   it's all we've got for the moment
743                  */
744                 block0 = page->index;
745                 block0 <<= shift;
746
747                 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
748                                                       block0);
749                 _debug("%llx -> %llx",
750                        (unsigned long long) block0,
751                        (unsigned long long) block);
752
753                 if (block) {
754                         /* we have data - add it to the list to give to the
755                          * backing fs */
756                         list_move(&page->lru, &backpages);
757                         (*nr_pages)--;
758                         nrbackpages++;
759                 } else if (space && pagevec_add(&pagevec, page) == 0) {
760                         fscache_mark_pages_cached(op, &pagevec);
761                         fscache_retrieval_complete(op, 1);
762                         ret = -ENODATA;
763                 } else {
764                         fscache_retrieval_complete(op, 1);
765                 }
766         }
767
768         if (pagevec_count(&pagevec) > 0)
769                 fscache_mark_pages_cached(op, &pagevec);
770
771         if (list_empty(pages))
772                 ret = 0;
773
774         /* submit the apparently valid pages to the backing fs to be read from
775          * disk */
776         if (nrbackpages > 0) {
777                 ret2 = cachefiles_read_backing_file(object, op, &backpages);
778                 if (ret2 == -ENOMEM || ret2 == -EINTR)
779                         ret = ret2;
780         }
781
782         _leave(" = %d [nr=%u%s]",
783                ret, *nr_pages, list_empty(pages) ? " empty" : "");
784         return ret;
785
786 all_enobufs:
787         fscache_retrieval_complete(op, *nr_pages);
788         return -ENOBUFS;
789 }
790
791 /*
792  * allocate a block in the cache in which to store a page
793  * - cache withdrawal is prevented by the caller
794  * - returns -EINTR if interrupted
795  * - returns -ENOMEM if ran out of memory
796  * - returns -ENOBUFS if no buffers can be made available
797  * - returns -ENOBUFS if page is beyond EOF
798  * - otherwise:
799  *   - the metadata will be retained
800  *   - 0 will be returned
801  */
802 int cachefiles_allocate_page(struct fscache_retrieval *op,
803                              struct page *page,
804                              gfp_t gfp)
805 {
806         struct cachefiles_object *object;
807         struct cachefiles_cache *cache;
808         int ret;
809
810         object = container_of(op->op.object,
811                               struct cachefiles_object, fscache);
812         cache = container_of(object->fscache.cache,
813                              struct cachefiles_cache, cache);
814
815         _enter("%p,{%lx},", object, page->index);
816
817         ret = cachefiles_has_space(cache, 0, 1);
818         if (ret == 0)
819                 fscache_mark_page_cached(op, page);
820         else
821                 ret = -ENOBUFS;
822
823         fscache_retrieval_complete(op, 1);
824         _leave(" = %d", ret);
825         return ret;
826 }
827
828 /*
829  * allocate blocks in the cache in which to store a set of pages
830  * - cache withdrawal is prevented by the caller
831  * - returns -EINTR if interrupted
832  * - returns -ENOMEM if ran out of memory
833  * - returns -ENOBUFS if some buffers couldn't be made available
834  * - returns -ENOBUFS if some pages are beyond EOF
835  * - otherwise:
836  *   - -ENODATA will be returned
837  * - metadata will be retained for any page marked
838  */
839 int cachefiles_allocate_pages(struct fscache_retrieval *op,
840                               struct list_head *pages,
841                               unsigned *nr_pages,
842                               gfp_t gfp)
843 {
844         struct cachefiles_object *object;
845         struct cachefiles_cache *cache;
846         struct pagevec pagevec;
847         struct page *page;
848         int ret;
849
850         object = container_of(op->op.object,
851                               struct cachefiles_object, fscache);
852         cache = container_of(object->fscache.cache,
853                              struct cachefiles_cache, cache);
854
855         _enter("%p,,,%d,", object, *nr_pages);
856
857         ret = cachefiles_has_space(cache, 0, *nr_pages);
858         if (ret == 0) {
859                 pagevec_init(&pagevec, 0);
860
861                 list_for_each_entry(page, pages, lru) {
862                         if (pagevec_add(&pagevec, page) == 0)
863                                 fscache_mark_pages_cached(op, &pagevec);
864                 }
865
866                 if (pagevec_count(&pagevec) > 0)
867                         fscache_mark_pages_cached(op, &pagevec);
868                 ret = -ENODATA;
869         } else {
870                 ret = -ENOBUFS;
871         }
872
873         fscache_retrieval_complete(op, *nr_pages);
874         _leave(" = %d", ret);
875         return ret;
876 }
877
878 /*
879  * request a page be stored in the cache
880  * - cache withdrawal is prevented by the caller
881  * - this request may be ignored if there's no cache block available, in which
882  *   case -ENOBUFS will be returned
883  * - if the op is in progress, 0 will be returned
884  */
885 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
886 {
887         struct cachefiles_object *object;
888         struct cachefiles_cache *cache;
889         mm_segment_t old_fs;
890         struct file *file;
891         struct path path;
892         loff_t pos, eof;
893         size_t len;
894         void *data;
895         int ret;
896
897         ASSERT(op != NULL);
898         ASSERT(page != NULL);
899
900         object = container_of(op->op.object,
901                               struct cachefiles_object, fscache);
902
903         _enter("%p,%p{%lx},,,", object, page, page->index);
904
905         if (!object->backer) {
906                 _leave(" = -ENOBUFS");
907                 return -ENOBUFS;
908         }
909
910         ASSERT(S_ISREG(object->backer->d_inode->i_mode));
911
912         cache = container_of(object->fscache.cache,
913                              struct cachefiles_cache, cache);
914
915         /* write the page to the backing filesystem and let it store it in its
916          * own time */
917         path.mnt = cache->mnt;
918         path.dentry = object->backer;
919         file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
920         if (IS_ERR(file)) {
921                 ret = PTR_ERR(file);
922         } else {
923                 ret = -EIO;
924                 if (file->f_op->write) {
925                         pos = (loff_t) page->index << PAGE_SHIFT;
926
927                         /* we mustn't write more data than we have, so we have
928                          * to beware of a partial page at EOF */
929                         eof = object->fscache.store_limit_l;
930                         len = PAGE_SIZE;
931                         if (eof & ~PAGE_MASK) {
932                                 ASSERTCMP(pos, <, eof);
933                                 if (eof - pos < PAGE_SIZE) {
934                                         _debug("cut short %llx to %llx",
935                                                pos, eof);
936                                         len = eof - pos;
937                                         ASSERTCMP(pos + len, ==, eof);
938                                 }
939                         }
940
941                         data = kmap(page);
942                         file_start_write(file);
943                         old_fs = get_fs();
944                         set_fs(KERNEL_DS);
945                         ret = file->f_op->write(
946                                 file, (const void __user *) data, len, &pos);
947                         set_fs(old_fs);
948                         kunmap(page);
949                         file_end_write(file);
950                         if (ret != len)
951                                 ret = -EIO;
952                 }
953                 fput(file);
954         }
955
956         if (ret < 0) {
957                 if (ret == -EIO)
958                         cachefiles_io_error_obj(
959                                 object, "Write page to backing file failed");
960                 ret = -ENOBUFS;
961         }
962
963         _leave(" = %d", ret);
964         return ret;
965 }
966
967 /*
968  * detach a backing block from a page
969  * - cache withdrawal is prevented by the caller
970  */
971 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
972 {
973         struct cachefiles_object *object;
974         struct cachefiles_cache *cache;
975
976         object = container_of(_object, struct cachefiles_object, fscache);
977         cache = container_of(object->fscache.cache,
978                              struct cachefiles_cache, cache);
979
980         _enter("%p,{%lu}", object, page->index);
981
982         spin_unlock(&object->fscache.cookie->lock);
983 }