staging: lustre: remove lustre_lite.h
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_page for VVP layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/mm.h>
43 #include <linux/mutex.h>
44 #include <linux/page-flags.h>
45 #include <linux/pagemap.h>
46
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51  *
52  * Page operations.
53  *
54  */
55
56 static void vvp_page_fini_common(struct vvp_page *vpg)
57 {
58         struct page *vmpage = vpg->vpg_page;
59
60         LASSERT(vmpage);
61         put_page(vmpage);
62 }
63
64 static void vvp_page_fini(const struct lu_env *env,
65                           struct cl_page_slice *slice)
66 {
67         struct vvp_page *vpg     = cl2vvp_page(slice);
68         struct page     *vmpage  = vpg->vpg_page;
69
70         /*
71          * vmpage->private was already cleared when page was moved into
72          * VPG_FREEING state.
73          */
74         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
75         vvp_page_fini_common(vpg);
76 }
77
78 static int vvp_page_own(const struct lu_env *env,
79                         const struct cl_page_slice *slice, struct cl_io *io,
80                         int nonblock)
81 {
82         struct vvp_page *vpg    = cl2vvp_page(slice);
83         struct page     *vmpage = vpg->vpg_page;
84
85         LASSERT(vmpage);
86         if (nonblock) {
87                 if (!trylock_page(vmpage))
88                         return -EAGAIN;
89
90                 if (unlikely(PageWriteback(vmpage))) {
91                         unlock_page(vmpage);
92                         return -EAGAIN;
93                 }
94
95                 return 0;
96         }
97
98         lock_page(vmpage);
99         wait_on_page_writeback(vmpage);
100
101         return 0;
102 }
103
104 static void vvp_page_assume(const struct lu_env *env,
105                             const struct cl_page_slice *slice,
106                             struct cl_io *unused)
107 {
108         struct page *vmpage = cl2vm_page(slice);
109
110         LASSERT(vmpage);
111         LASSERT(PageLocked(vmpage));
112         wait_on_page_writeback(vmpage);
113 }
114
115 static void vvp_page_unassume(const struct lu_env *env,
116                               const struct cl_page_slice *slice,
117                               struct cl_io *unused)
118 {
119         struct page *vmpage = cl2vm_page(slice);
120
121         LASSERT(vmpage);
122         LASSERT(PageLocked(vmpage));
123 }
124
125 static void vvp_page_disown(const struct lu_env *env,
126                             const struct cl_page_slice *slice, struct cl_io *io)
127 {
128         struct page *vmpage = cl2vm_page(slice);
129
130         LASSERT(vmpage);
131         LASSERT(PageLocked(vmpage));
132
133         unlock_page(cl2vm_page(slice));
134 }
135
136 static void vvp_page_discard(const struct lu_env *env,
137                              const struct cl_page_slice *slice,
138                              struct cl_io *unused)
139 {
140         struct page        *vmpage  = cl2vm_page(slice);
141         struct vvp_page      *vpg     = cl2vvp_page(slice);
142
143         LASSERT(vmpage);
144         LASSERT(PageLocked(vmpage));
145
146         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
147                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
148
149         ll_invalidate_page(vmpage);
150 }
151
152 static void vvp_page_delete(const struct lu_env *env,
153                             const struct cl_page_slice *slice)
154 {
155         struct page       *vmpage = cl2vm_page(slice);
156         struct inode     *inode  = vmpage->mapping->host;
157         struct cl_object *obj    = slice->cpl_obj;
158         struct cl_page   *page   = slice->cpl_page;
159         int refc;
160
161         LASSERT(PageLocked(vmpage));
162         LASSERT((struct cl_page *)vmpage->private == page);
163         LASSERT(inode == vvp_object_inode(obj));
164
165         vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
166
167         /* Drop the reference count held in vvp_page_init */
168         refc = atomic_dec_return(&page->cp_ref);
169         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
170
171         ClearPageUptodate(vmpage);
172         ClearPagePrivate(vmpage);
173         vmpage->private = 0;
174         /*
175          * Reference from vmpage to cl_page is removed, but the reference back
176          * is still here. It is removed later in vvp_page_fini().
177          */
178 }
179
180 static void vvp_page_export(const struct lu_env *env,
181                             const struct cl_page_slice *slice,
182                             int uptodate)
183 {
184         struct page *vmpage = cl2vm_page(slice);
185
186         LASSERT(vmpage);
187         LASSERT(PageLocked(vmpage));
188         if (uptodate)
189                 SetPageUptodate(vmpage);
190         else
191                 ClearPageUptodate(vmpage);
192 }
193
194 static int vvp_page_is_vmlocked(const struct lu_env *env,
195                                 const struct cl_page_slice *slice)
196 {
197         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
198 }
199
200 static int vvp_page_prep_read(const struct lu_env *env,
201                               const struct cl_page_slice *slice,
202                               struct cl_io *unused)
203 {
204         /* Skip the page already marked as PG_uptodate. */
205         return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
206 }
207
208 static int vvp_page_prep_write(const struct lu_env *env,
209                                const struct cl_page_slice *slice,
210                                struct cl_io *unused)
211 {
212         struct page *vmpage = cl2vm_page(slice);
213         struct cl_page *pg = slice->cpl_page;
214
215         LASSERT(PageLocked(vmpage));
216         LASSERT(!PageDirty(vmpage));
217
218         /* ll_writepage path is not a sync write, so need to set page writeback
219          * flag
220          */
221         if (!pg->cp_sync_io)
222                 set_page_writeback(vmpage);
223
224         vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
225
226         return 0;
227 }
228
229 /**
230  * Handles page transfer errors at VM level.
231  *
232  * This takes inode as a separate argument, because inode on which error is to
233  * be set can be different from \a vmpage inode in case of direct-io.
234  */
235 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
236 {
237         struct vvp_object *obj = cl_inode2vvp(inode);
238
239         if (ioret == 0) {
240                 ClearPageError(vmpage);
241                 obj->vob_discard_page_warned = 0;
242         } else {
243                 SetPageError(vmpage);
244                 if (ioret == -ENOSPC)
245                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
246                 else
247                         set_bit(AS_EIO, &inode->i_mapping->flags);
248
249                 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
250                     obj->vob_discard_page_warned == 0) {
251                         obj->vob_discard_page_warned = 1;
252                         ll_dirty_page_discard_warn(vmpage, ioret);
253                 }
254         }
255 }
256
257 static void vvp_page_completion_read(const struct lu_env *env,
258                                      const struct cl_page_slice *slice,
259                                      int ioret)
260 {
261         struct vvp_page *vpg    = cl2vvp_page(slice);
262         struct page     *vmpage = vpg->vpg_page;
263         struct cl_page  *page   = slice->cpl_page;
264         struct inode    *inode  = vvp_object_inode(page->cp_obj);
265
266         LASSERT(PageLocked(vmpage));
267         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
268
269         if (vpg->vpg_defer_uptodate)
270                 ll_ra_count_put(ll_i2sbi(inode), 1);
271
272         if (ioret == 0)  {
273                 if (!vpg->vpg_defer_uptodate)
274                         cl_page_export(env, page, 1);
275         } else {
276                 vpg->vpg_defer_uptodate = 0;
277         }
278
279         if (!page->cp_sync_io)
280                 unlock_page(vmpage);
281 }
282
283 static void vvp_page_completion_write(const struct lu_env *env,
284                                       const struct cl_page_slice *slice,
285                                       int ioret)
286 {
287         struct vvp_page *vpg     = cl2vvp_page(slice);
288         struct cl_page  *pg     = slice->cpl_page;
289         struct page     *vmpage = vpg->vpg_page;
290
291         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
292
293         /*
294          * TODO: Actually it makes sense to add the page into oap pending
295          * list again and so that we don't need to take the page out from
296          * SoM write pending list, if we just meet a recoverable error,
297          * -ENOMEM, etc.
298          * To implement this, we just need to return a non zero value in
299          * ->cpo_completion method. The underlying transfer should be notified
300          * and then re-add the page into pending transfer queue.  -jay
301          */
302
303         vpg->vpg_write_queued = 0;
304         vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
305
306         if (pg->cp_sync_io) {
307                 LASSERT(PageLocked(vmpage));
308                 LASSERT(!PageWriteback(vmpage));
309         } else {
310                 LASSERT(PageWriteback(vmpage));
311                 /*
312                  * Only mark the page error only when it's an async write
313                  * because applications won't wait for IO to finish.
314                  */
315                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
316
317                 end_page_writeback(vmpage);
318         }
319 }
320
321 /**
322  * Implements cl_page_operations::cpo_make_ready() method.
323  *
324  * This is called to yank a page from the transfer cache and to send it out as
325  * a part of transfer. This function try-locks the page. If try-lock failed,
326  * page is owned by some concurrent IO, and should be skipped (this is bad,
327  * but hopefully rare situation, as it usually results in transfer being
328  * shorter than possible).
329  *
330  * \retval 0      success, page can be placed into transfer
331  *
332  * \retval -EAGAIN page is either used by concurrent IO has been
333  * truncated. Skip it.
334  */
335 static int vvp_page_make_ready(const struct lu_env *env,
336                                const struct cl_page_slice *slice)
337 {
338         struct page *vmpage = cl2vm_page(slice);
339         struct cl_page *pg = slice->cpl_page;
340         int result = 0;
341
342         lock_page(vmpage);
343         if (clear_page_dirty_for_io(vmpage)) {
344                 LASSERT(pg->cp_state == CPS_CACHED);
345                 /* This actually clears the dirty bit in the radix tree. */
346                 set_page_writeback(vmpage);
347                 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
348                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
349         } else if (pg->cp_state == CPS_PAGEOUT) {
350                 /* is it possible for osc_flush_async_page() to already
351                  * make it ready?
352                  */
353                 result = -EALREADY;
354         } else {
355                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
356                               pg->cp_state);
357                 LBUG();
358         }
359         unlock_page(vmpage);
360         return result;
361 }
362
363 static int vvp_page_is_under_lock(const struct lu_env *env,
364                                   const struct cl_page_slice *slice,
365                                   struct cl_io *io, pgoff_t *max_index)
366 {
367         if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
368             io->ci_type == CIT_FAULT) {
369                 struct vvp_io *vio = vvp_env_io(env);
370
371                 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
372                         *max_index = CL_PAGE_EOF;
373         }
374         return 0;
375 }
376
377 static int vvp_page_print(const struct lu_env *env,
378                           const struct cl_page_slice *slice,
379                           void *cookie, lu_printer_t printer)
380 {
381         struct vvp_page *vpg = cl2vvp_page(slice);
382         struct page     *vmpage = vpg->vpg_page;
383
384         (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
385                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
386                    vpg->vpg_write_queued, vmpage);
387         if (vmpage) {
388                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
389                            (long)vmpage->flags, page_count(vmpage),
390                            page_mapcount(vmpage), vmpage->private,
391                            vmpage->index,
392                            list_empty(&vmpage->lru) ? "not-" : "");
393         }
394
395         (*printer)(env, cookie, "\n");
396
397         return 0;
398 }
399
400 static int vvp_page_fail(const struct lu_env *env,
401                          const struct cl_page_slice *slice)
402 {
403         /*
404          * Cached read?
405          */
406         LBUG();
407
408         return 0;
409 }
410
411 static const struct cl_page_operations vvp_page_ops = {
412         .cpo_own           = vvp_page_own,
413         .cpo_assume     = vvp_page_assume,
414         .cpo_unassume      = vvp_page_unassume,
415         .cpo_disown     = vvp_page_disown,
416         .cpo_discard       = vvp_page_discard,
417         .cpo_delete     = vvp_page_delete,
418         .cpo_export     = vvp_page_export,
419         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
420         .cpo_fini         = vvp_page_fini,
421         .cpo_print       = vvp_page_print,
422         .cpo_is_under_lock = vvp_page_is_under_lock,
423         .io = {
424                 [CRT_READ] = {
425                         .cpo_prep       = vvp_page_prep_read,
426                         .cpo_completion  = vvp_page_completion_read,
427                         .cpo_make_ready = vvp_page_fail,
428                 },
429                 [CRT_WRITE] = {
430                         .cpo_prep       = vvp_page_prep_write,
431                         .cpo_completion  = vvp_page_completion_write,
432                         .cpo_make_ready  = vvp_page_make_ready,
433                 },
434         },
435 };
436
437 static int vvp_transient_page_prep(const struct lu_env *env,
438                                    const struct cl_page_slice *slice,
439                                    struct cl_io *unused)
440 {
441         /* transient page should always be sent. */
442         return 0;
443 }
444
445 static int vvp_transient_page_own(const struct lu_env *env,
446                                   const struct cl_page_slice *slice,
447                                   struct cl_io *unused, int nonblock)
448 {
449         return 0;
450 }
451
452 static void vvp_transient_page_assume(const struct lu_env *env,
453                                       const struct cl_page_slice *slice,
454                                       struct cl_io *unused)
455 {
456 }
457
458 static void vvp_transient_page_unassume(const struct lu_env *env,
459                                         const struct cl_page_slice *slice,
460                                         struct cl_io *unused)
461 {
462 }
463
464 static void vvp_transient_page_disown(const struct lu_env *env,
465                                       const struct cl_page_slice *slice,
466                                       struct cl_io *unused)
467 {
468 }
469
470 static void vvp_transient_page_discard(const struct lu_env *env,
471                                        const struct cl_page_slice *slice,
472                                        struct cl_io *unused)
473 {
474         struct cl_page *page = slice->cpl_page;
475
476         /*
477          * For transient pages, remove it from the radix tree.
478          */
479         cl_page_delete(env, page);
480 }
481
482 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
483                                           const struct cl_page_slice *slice)
484 {
485         struct inode    *inode = vvp_object_inode(slice->cpl_obj);
486         int     locked;
487
488         locked = !inode_trylock(inode);
489         if (!locked)
490                 inode_unlock(inode);
491         return locked ? -EBUSY : -ENODATA;
492 }
493
494 static void
495 vvp_transient_page_completion(const struct lu_env *env,
496                               const struct cl_page_slice *slice,
497                               int ioret)
498 {
499 }
500
501 static void vvp_transient_page_fini(const struct lu_env *env,
502                                     struct cl_page_slice *slice)
503 {
504         struct vvp_page *vpg = cl2vvp_page(slice);
505         struct cl_page *clp = slice->cpl_page;
506         struct vvp_object *clobj = cl2vvp(clp->cp_obj);
507
508         vvp_page_fini_common(vpg);
509         clobj->vob_transient_pages--;
510 }
511
512 static const struct cl_page_operations vvp_transient_page_ops = {
513         .cpo_own           = vvp_transient_page_own,
514         .cpo_assume     = vvp_transient_page_assume,
515         .cpo_unassume      = vvp_transient_page_unassume,
516         .cpo_disown     = vvp_transient_page_disown,
517         .cpo_discard       = vvp_transient_page_discard,
518         .cpo_fini         = vvp_transient_page_fini,
519         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
520         .cpo_print       = vvp_page_print,
521         .cpo_is_under_lock      = vvp_page_is_under_lock,
522         .io = {
523                 [CRT_READ] = {
524                         .cpo_prep       = vvp_transient_page_prep,
525                         .cpo_completion  = vvp_transient_page_completion,
526                 },
527                 [CRT_WRITE] = {
528                         .cpo_prep       = vvp_transient_page_prep,
529                         .cpo_completion  = vvp_transient_page_completion,
530                 }
531         }
532 };
533
534 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
535                   struct cl_page *page, pgoff_t index)
536 {
537         struct vvp_page *vpg = cl_object_page_slice(obj, page);
538         struct page     *vmpage = page->cp_vmpage;
539
540         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
541
542         vpg->vpg_page = vmpage;
543         get_page(vmpage);
544
545         INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
546         if (page->cp_type == CPT_CACHEABLE) {
547                 /* in cache, decref in vvp_page_delete */
548                 atomic_inc(&page->cp_ref);
549                 SetPagePrivate(vmpage);
550                 vmpage->private = (unsigned long)page;
551                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
552                                   &vvp_page_ops);
553         } else {
554                 struct vvp_object *clobj = cl2vvp(obj);
555
556                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
557                                   &vvp_transient_page_ops);
558                 clobj->vob_transient_pages++;
559         }
560         return 0;
561 }