Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44 #include <linux/atomic.h>
45 #include <linux/bitops.h>
46 #include <linux/mm.h>
47 #include <linux/mutex.h>
48 #include <linux/page-flags.h>
49 #include <linux/pagemap.h>
50
51 #include "../include/lustre_lite.h"
52
53 #include "llite_internal.h"
54 #include "vvp_internal.h"
55
56 /*****************************************************************************
57  *
58  * Page operations.
59  *
60  */
61
62 static void vvp_page_fini_common(struct vvp_page *vpg)
63 {
64         struct page *vmpage = vpg->vpg_page;
65
66         LASSERT(vmpage);
67         put_page(vmpage);
68 }
69
70 static void vvp_page_fini(const struct lu_env *env,
71                           struct cl_page_slice *slice)
72 {
73         struct vvp_page *vpg     = cl2vvp_page(slice);
74         struct page     *vmpage  = vpg->vpg_page;
75
76         /*
77          * vmpage->private was already cleared when page was moved into
78          * VPG_FREEING state.
79          */
80         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
81         vvp_page_fini_common(vpg);
82 }
83
84 static int vvp_page_own(const struct lu_env *env,
85                         const struct cl_page_slice *slice, struct cl_io *io,
86                         int nonblock)
87 {
88         struct vvp_page *vpg    = cl2vvp_page(slice);
89         struct page     *vmpage = vpg->vpg_page;
90
91         LASSERT(vmpage);
92         if (nonblock) {
93                 if (!trylock_page(vmpage))
94                         return -EAGAIN;
95
96                 if (unlikely(PageWriteback(vmpage))) {
97                         unlock_page(vmpage);
98                         return -EAGAIN;
99                 }
100
101                 return 0;
102         }
103
104         lock_page(vmpage);
105         wait_on_page_writeback(vmpage);
106
107         return 0;
108 }
109
110 static void vvp_page_assume(const struct lu_env *env,
111                             const struct cl_page_slice *slice,
112                             struct cl_io *unused)
113 {
114         struct page *vmpage = cl2vm_page(slice);
115
116         LASSERT(vmpage);
117         LASSERT(PageLocked(vmpage));
118         wait_on_page_writeback(vmpage);
119 }
120
121 static void vvp_page_unassume(const struct lu_env *env,
122                               const struct cl_page_slice *slice,
123                               struct cl_io *unused)
124 {
125         struct page *vmpage = cl2vm_page(slice);
126
127         LASSERT(vmpage);
128         LASSERT(PageLocked(vmpage));
129 }
130
131 static void vvp_page_disown(const struct lu_env *env,
132                             const struct cl_page_slice *slice, struct cl_io *io)
133 {
134         struct page *vmpage = cl2vm_page(slice);
135
136         LASSERT(vmpage);
137         LASSERT(PageLocked(vmpage));
138
139         unlock_page(cl2vm_page(slice));
140 }
141
142 static void vvp_page_discard(const struct lu_env *env,
143                              const struct cl_page_slice *slice,
144                              struct cl_io *unused)
145 {
146         struct page        *vmpage  = cl2vm_page(slice);
147         struct vvp_page      *vpg     = cl2vvp_page(slice);
148
149         LASSERT(vmpage);
150         LASSERT(PageLocked(vmpage));
151
152         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
153                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
154
155         ll_invalidate_page(vmpage);
156 }
157
158 static void vvp_page_delete(const struct lu_env *env,
159                             const struct cl_page_slice *slice)
160 {
161         struct page       *vmpage = cl2vm_page(slice);
162         struct inode     *inode  = vmpage->mapping->host;
163         struct cl_object *obj    = slice->cpl_obj;
164         struct cl_page   *page   = slice->cpl_page;
165         int refc;
166
167         LASSERT(PageLocked(vmpage));
168         LASSERT((struct cl_page *)vmpage->private == page);
169         LASSERT(inode == vvp_object_inode(obj));
170
171         vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
172
173         /* Drop the reference count held in vvp_page_init */
174         refc = atomic_dec_return(&page->cp_ref);
175         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
176
177         ClearPageUptodate(vmpage);
178         ClearPagePrivate(vmpage);
179         vmpage->private = 0;
180         /*
181          * Reference from vmpage to cl_page is removed, but the reference back
182          * is still here. It is removed later in vvp_page_fini().
183          */
184 }
185
186 static void vvp_page_export(const struct lu_env *env,
187                             const struct cl_page_slice *slice,
188                             int uptodate)
189 {
190         struct page *vmpage = cl2vm_page(slice);
191
192         LASSERT(vmpage);
193         LASSERT(PageLocked(vmpage));
194         if (uptodate)
195                 SetPageUptodate(vmpage);
196         else
197                 ClearPageUptodate(vmpage);
198 }
199
200 static int vvp_page_is_vmlocked(const struct lu_env *env,
201                                 const struct cl_page_slice *slice)
202 {
203         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
204 }
205
206 static int vvp_page_prep_read(const struct lu_env *env,
207                               const struct cl_page_slice *slice,
208                               struct cl_io *unused)
209 {
210         /* Skip the page already marked as PG_uptodate. */
211         return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
212 }
213
214 static int vvp_page_prep_write(const struct lu_env *env,
215                                const struct cl_page_slice *slice,
216                                struct cl_io *unused)
217 {
218         struct page *vmpage = cl2vm_page(slice);
219         struct cl_page *pg = slice->cpl_page;
220
221         LASSERT(PageLocked(vmpage));
222         LASSERT(!PageDirty(vmpage));
223
224         /* ll_writepage path is not a sync write, so need to set page writeback
225          * flag
226          */
227         if (!pg->cp_sync_io)
228                 set_page_writeback(vmpage);
229
230         vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
231
232         return 0;
233 }
234
235 /**
236  * Handles page transfer errors at VM level.
237  *
238  * This takes inode as a separate argument, because inode on which error is to
239  * be set can be different from \a vmpage inode in case of direct-io.
240  */
241 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
242 {
243         struct vvp_object *obj = cl_inode2vvp(inode);
244
245         if (ioret == 0) {
246                 ClearPageError(vmpage);
247                 obj->vob_discard_page_warned = 0;
248         } else {
249                 SetPageError(vmpage);
250                 if (ioret == -ENOSPC)
251                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
252                 else
253                         set_bit(AS_EIO, &inode->i_mapping->flags);
254
255                 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
256                      obj->vob_discard_page_warned == 0) {
257                         obj->vob_discard_page_warned = 1;
258                         ll_dirty_page_discard_warn(vmpage, ioret);
259                 }
260         }
261 }
262
263 static void vvp_page_completion_read(const struct lu_env *env,
264                                      const struct cl_page_slice *slice,
265                                      int ioret)
266 {
267         struct vvp_page *vpg    = cl2vvp_page(slice);
268         struct page     *vmpage = vpg->vpg_page;
269         struct cl_page  *page   = slice->cpl_page;
270         struct inode    *inode  = vvp_object_inode(page->cp_obj);
271
272         LASSERT(PageLocked(vmpage));
273         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
274
275         if (vpg->vpg_defer_uptodate)
276                 ll_ra_count_put(ll_i2sbi(inode), 1);
277
278         if (ioret == 0)  {
279                 if (!vpg->vpg_defer_uptodate)
280                         cl_page_export(env, page, 1);
281         } else {
282                 vpg->vpg_defer_uptodate = 0;
283         }
284
285         if (!page->cp_sync_io)
286                 unlock_page(vmpage);
287 }
288
289 static void vvp_page_completion_write(const struct lu_env *env,
290                                       const struct cl_page_slice *slice,
291                                       int ioret)
292 {
293         struct vvp_page *vpg     = cl2vvp_page(slice);
294         struct cl_page  *pg     = slice->cpl_page;
295         struct page     *vmpage = vpg->vpg_page;
296
297         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
298
299         /*
300          * TODO: Actually it makes sense to add the page into oap pending
301          * list again and so that we don't need to take the page out from
302          * SoM write pending list, if we just meet a recoverable error,
303          * -ENOMEM, etc.
304          * To implement this, we just need to return a non zero value in
305          * ->cpo_completion method. The underlying transfer should be notified
306          * and then re-add the page into pending transfer queue.  -jay
307          */
308
309         vpg->vpg_write_queued = 0;
310         vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
311
312         if (pg->cp_sync_io) {
313                 LASSERT(PageLocked(vmpage));
314                 LASSERT(!PageWriteback(vmpage));
315         } else {
316                 LASSERT(PageWriteback(vmpage));
317                 /*
318                  * Only mark the page error only when it's an async write
319                  * because applications won't wait for IO to finish.
320                  */
321                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
322
323                 end_page_writeback(vmpage);
324         }
325 }
326
327 /**
328  * Implements cl_page_operations::cpo_make_ready() method.
329  *
330  * This is called to yank a page from the transfer cache and to send it out as
331  * a part of transfer. This function try-locks the page. If try-lock failed,
332  * page is owned by some concurrent IO, and should be skipped (this is bad,
333  * but hopefully rare situation, as it usually results in transfer being
334  * shorter than possible).
335  *
336  * \retval 0      success, page can be placed into transfer
337  *
338  * \retval -EAGAIN page is either used by concurrent IO has been
339  * truncated. Skip it.
340  */
341 static int vvp_page_make_ready(const struct lu_env *env,
342                                const struct cl_page_slice *slice)
343 {
344         struct page *vmpage = cl2vm_page(slice);
345         struct cl_page *pg = slice->cpl_page;
346         int result = 0;
347
348         lock_page(vmpage);
349         if (clear_page_dirty_for_io(vmpage)) {
350                 LASSERT(pg->cp_state == CPS_CACHED);
351                 /* This actually clears the dirty bit in the radix tree. */
352                 set_page_writeback(vmpage);
353                 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
354                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
355         } else if (pg->cp_state == CPS_PAGEOUT) {
356                 /* is it possible for osc_flush_async_page() to already
357                  * make it ready?
358                  */
359                 result = -EALREADY;
360         } else {
361                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
362                               pg->cp_state);
363                 LBUG();
364         }
365         unlock_page(vmpage);
366         return result;
367 }
368
369 static int vvp_page_is_under_lock(const struct lu_env *env,
370                                   const struct cl_page_slice *slice,
371                                   struct cl_io *io, pgoff_t *max_index)
372 {
373         if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
374             io->ci_type == CIT_FAULT) {
375                 struct vvp_io *vio = vvp_env_io(env);
376
377                 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
378                         *max_index = CL_PAGE_EOF;
379         }
380         return 0;
381 }
382
383 static int vvp_page_print(const struct lu_env *env,
384                           const struct cl_page_slice *slice,
385                           void *cookie, lu_printer_t printer)
386 {
387         struct vvp_page *vpg = cl2vvp_page(slice);
388         struct page     *vmpage = vpg->vpg_page;
389
390         (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
391                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
392                    vpg->vpg_write_queued, vmpage);
393         if (vmpage) {
394                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
395                            (long)vmpage->flags, page_count(vmpage),
396                            page_mapcount(vmpage), vmpage->private,
397                            vmpage->index,
398                            list_empty(&vmpage->lru) ? "not-" : "");
399         }
400
401         (*printer)(env, cookie, "\n");
402
403         return 0;
404 }
405
406 static int vvp_page_fail(const struct lu_env *env,
407                          const struct cl_page_slice *slice)
408 {
409         /*
410          * Cached read?
411          */
412         LBUG();
413
414         return 0;
415 }
416
417 static const struct cl_page_operations vvp_page_ops = {
418         .cpo_own           = vvp_page_own,
419         .cpo_assume     = vvp_page_assume,
420         .cpo_unassume      = vvp_page_unassume,
421         .cpo_disown     = vvp_page_disown,
422         .cpo_discard       = vvp_page_discard,
423         .cpo_delete     = vvp_page_delete,
424         .cpo_export     = vvp_page_export,
425         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
426         .cpo_fini         = vvp_page_fini,
427         .cpo_print       = vvp_page_print,
428         .cpo_is_under_lock = vvp_page_is_under_lock,
429         .io = {
430                 [CRT_READ] = {
431                         .cpo_prep       = vvp_page_prep_read,
432                         .cpo_completion  = vvp_page_completion_read,
433                         .cpo_make_ready = vvp_page_fail,
434                 },
435                 [CRT_WRITE] = {
436                         .cpo_prep       = vvp_page_prep_write,
437                         .cpo_completion  = vvp_page_completion_write,
438                         .cpo_make_ready  = vvp_page_make_ready,
439                 },
440         },
441 };
442
443 static int vvp_transient_page_prep(const struct lu_env *env,
444                                    const struct cl_page_slice *slice,
445                                    struct cl_io *unused)
446 {
447         /* transient page should always be sent. */
448         return 0;
449 }
450
451 static void vvp_transient_page_verify(const struct cl_page *page)
452 {
453         struct inode *inode = vvp_object_inode(page->cp_obj);
454
455         LASSERT(!inode_trylock(inode));
456 }
457
458 static int vvp_transient_page_own(const struct lu_env *env,
459                                   const struct cl_page_slice *slice,
460                                   struct cl_io *unused, int nonblock)
461 {
462         vvp_transient_page_verify(slice->cpl_page);
463         return 0;
464 }
465
466 static void vvp_transient_page_assume(const struct lu_env *env,
467                                       const struct cl_page_slice *slice,
468                                       struct cl_io *unused)
469 {
470         vvp_transient_page_verify(slice->cpl_page);
471 }
472
473 static void vvp_transient_page_unassume(const struct lu_env *env,
474                                         const struct cl_page_slice *slice,
475                                         struct cl_io *unused)
476 {
477         vvp_transient_page_verify(slice->cpl_page);
478 }
479
480 static void vvp_transient_page_disown(const struct lu_env *env,
481                                       const struct cl_page_slice *slice,
482                                       struct cl_io *unused)
483 {
484         vvp_transient_page_verify(slice->cpl_page);
485 }
486
487 static void vvp_transient_page_discard(const struct lu_env *env,
488                                        const struct cl_page_slice *slice,
489                                        struct cl_io *unused)
490 {
491         struct cl_page *page = slice->cpl_page;
492
493         vvp_transient_page_verify(slice->cpl_page);
494
495         /*
496          * For transient pages, remove it from the radix tree.
497          */
498         cl_page_delete(env, page);
499 }
500
501 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
502                                           const struct cl_page_slice *slice)
503 {
504         struct inode    *inode = vvp_object_inode(slice->cpl_obj);
505         int     locked;
506
507         locked = !inode_trylock(inode);
508         if (!locked)
509                 inode_unlock(inode);
510         return locked ? -EBUSY : -ENODATA;
511 }
512
513 static void
514 vvp_transient_page_completion(const struct lu_env *env,
515                               const struct cl_page_slice *slice,
516                               int ioret)
517 {
518         vvp_transient_page_verify(slice->cpl_page);
519 }
520
521 static void vvp_transient_page_fini(const struct lu_env *env,
522                                     struct cl_page_slice *slice)
523 {
524         struct vvp_page *vpg = cl2vvp_page(slice);
525         struct cl_page *clp = slice->cpl_page;
526         struct vvp_object *clobj = cl2vvp(clp->cp_obj);
527
528         vvp_page_fini_common(vpg);
529         LASSERT(!inode_trylock(clobj->vob_inode));
530         clobj->vob_transient_pages--;
531 }
532
533 static const struct cl_page_operations vvp_transient_page_ops = {
534         .cpo_own           = vvp_transient_page_own,
535         .cpo_assume     = vvp_transient_page_assume,
536         .cpo_unassume      = vvp_transient_page_unassume,
537         .cpo_disown     = vvp_transient_page_disown,
538         .cpo_discard       = vvp_transient_page_discard,
539         .cpo_fini         = vvp_transient_page_fini,
540         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
541         .cpo_print       = vvp_page_print,
542         .cpo_is_under_lock      = vvp_page_is_under_lock,
543         .io = {
544                 [CRT_READ] = {
545                         .cpo_prep       = vvp_transient_page_prep,
546                         .cpo_completion  = vvp_transient_page_completion,
547                 },
548                 [CRT_WRITE] = {
549                         .cpo_prep       = vvp_transient_page_prep,
550                         .cpo_completion  = vvp_transient_page_completion,
551                 }
552         }
553 };
554
555 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
556                 struct cl_page *page, pgoff_t index)
557 {
558         struct vvp_page *vpg = cl_object_page_slice(obj, page);
559         struct page     *vmpage = page->cp_vmpage;
560
561         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
562
563         vpg->vpg_page = vmpage;
564         get_page(vmpage);
565
566         INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
567         if (page->cp_type == CPT_CACHEABLE) {
568                 /* in cache, decref in vvp_page_delete */
569                 atomic_inc(&page->cp_ref);
570                 SetPagePrivate(vmpage);
571                 vmpage->private = (unsigned long)page;
572                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
573                                   &vvp_page_ops);
574         } else {
575                 struct vvp_object *clobj = cl2vvp(obj);
576
577                 LASSERT(!inode_trylock(clobj->vob_inode));
578                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
579                                   &vvp_transient_page_ops);
580                 clobj->vob_transient_pages++;
581         }
582         return 0;
583 }