4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <linux/atomic.h>
45 #include <linux/bitops.h>
47 #include <linux/mutex.h>
48 #include <linux/page-flags.h>
49 #include <linux/pagemap.h>
51 #include "../include/lustre_lite.h"
53 #include "llite_internal.h"
54 #include "vvp_internal.h"
56 /*****************************************************************************
62 static void vvp_page_fini_common(struct vvp_page *vpg)
64 struct page *vmpage = vpg->vpg_page;
70 static void vvp_page_fini(const struct lu_env *env,
71 struct cl_page_slice *slice)
73 struct vvp_page *vpg = cl2vvp_page(slice);
74 struct page *vmpage = vpg->vpg_page;
77 * vmpage->private was already cleared when page was moved into
80 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
81 vvp_page_fini_common(vpg);
84 static int vvp_page_own(const struct lu_env *env,
85 const struct cl_page_slice *slice, struct cl_io *io,
88 struct vvp_page *vpg = cl2vvp_page(slice);
89 struct page *vmpage = vpg->vpg_page;
93 if (!trylock_page(vmpage))
96 if (unlikely(PageWriteback(vmpage))) {
105 wait_on_page_writeback(vmpage);
110 static void vvp_page_assume(const struct lu_env *env,
111 const struct cl_page_slice *slice,
112 struct cl_io *unused)
114 struct page *vmpage = cl2vm_page(slice);
117 LASSERT(PageLocked(vmpage));
118 wait_on_page_writeback(vmpage);
121 static void vvp_page_unassume(const struct lu_env *env,
122 const struct cl_page_slice *slice,
123 struct cl_io *unused)
125 struct page *vmpage = cl2vm_page(slice);
128 LASSERT(PageLocked(vmpage));
131 static void vvp_page_disown(const struct lu_env *env,
132 const struct cl_page_slice *slice, struct cl_io *io)
134 struct page *vmpage = cl2vm_page(slice);
137 LASSERT(PageLocked(vmpage));
139 unlock_page(cl2vm_page(slice));
142 static void vvp_page_discard(const struct lu_env *env,
143 const struct cl_page_slice *slice,
144 struct cl_io *unused)
146 struct page *vmpage = cl2vm_page(slice);
147 struct vvp_page *vpg = cl2vvp_page(slice);
150 LASSERT(PageLocked(vmpage));
152 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
153 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
155 ll_invalidate_page(vmpage);
158 static void vvp_page_delete(const struct lu_env *env,
159 const struct cl_page_slice *slice)
161 struct page *vmpage = cl2vm_page(slice);
162 struct inode *inode = vmpage->mapping->host;
163 struct cl_object *obj = slice->cpl_obj;
164 struct cl_page *page = slice->cpl_page;
167 LASSERT(PageLocked(vmpage));
168 LASSERT((struct cl_page *)vmpage->private == page);
169 LASSERT(inode == vvp_object_inode(obj));
171 vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
173 /* Drop the reference count held in vvp_page_init */
174 refc = atomic_dec_return(&page->cp_ref);
175 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
177 ClearPageUptodate(vmpage);
178 ClearPagePrivate(vmpage);
181 * Reference from vmpage to cl_page is removed, but the reference back
182 * is still here. It is removed later in vvp_page_fini().
186 static void vvp_page_export(const struct lu_env *env,
187 const struct cl_page_slice *slice,
190 struct page *vmpage = cl2vm_page(slice);
193 LASSERT(PageLocked(vmpage));
195 SetPageUptodate(vmpage);
197 ClearPageUptodate(vmpage);
200 static int vvp_page_is_vmlocked(const struct lu_env *env,
201 const struct cl_page_slice *slice)
203 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
206 static int vvp_page_prep_read(const struct lu_env *env,
207 const struct cl_page_slice *slice,
208 struct cl_io *unused)
210 /* Skip the page already marked as PG_uptodate. */
211 return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
214 static int vvp_page_prep_write(const struct lu_env *env,
215 const struct cl_page_slice *slice,
216 struct cl_io *unused)
218 struct page *vmpage = cl2vm_page(slice);
219 struct cl_page *pg = slice->cpl_page;
221 LASSERT(PageLocked(vmpage));
222 LASSERT(!PageDirty(vmpage));
224 /* ll_writepage path is not a sync write, so need to set page writeback
228 set_page_writeback(vmpage);
230 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
236 * Handles page transfer errors at VM level.
238 * This takes inode as a separate argument, because inode on which error is to
239 * be set can be different from \a vmpage inode in case of direct-io.
241 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
243 struct vvp_object *obj = cl_inode2vvp(inode);
246 ClearPageError(vmpage);
247 obj->vob_discard_page_warned = 0;
249 SetPageError(vmpage);
250 if (ioret == -ENOSPC)
251 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
253 set_bit(AS_EIO, &inode->i_mapping->flags);
255 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
256 obj->vob_discard_page_warned == 0) {
257 obj->vob_discard_page_warned = 1;
258 ll_dirty_page_discard_warn(vmpage, ioret);
263 static void vvp_page_completion_read(const struct lu_env *env,
264 const struct cl_page_slice *slice,
267 struct vvp_page *vpg = cl2vvp_page(slice);
268 struct page *vmpage = vpg->vpg_page;
269 struct cl_page *page = slice->cpl_page;
270 struct inode *inode = vvp_object_inode(page->cp_obj);
272 LASSERT(PageLocked(vmpage));
273 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
275 if (vpg->vpg_defer_uptodate)
276 ll_ra_count_put(ll_i2sbi(inode), 1);
279 if (!vpg->vpg_defer_uptodate)
280 cl_page_export(env, page, 1);
282 vpg->vpg_defer_uptodate = 0;
285 if (!page->cp_sync_io)
289 static void vvp_page_completion_write(const struct lu_env *env,
290 const struct cl_page_slice *slice,
293 struct vvp_page *vpg = cl2vvp_page(slice);
294 struct cl_page *pg = slice->cpl_page;
295 struct page *vmpage = vpg->vpg_page;
297 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
300 * TODO: Actually it makes sense to add the page into oap pending
301 * list again and so that we don't need to take the page out from
302 * SoM write pending list, if we just meet a recoverable error,
304 * To implement this, we just need to return a non zero value in
305 * ->cpo_completion method. The underlying transfer should be notified
306 * and then re-add the page into pending transfer queue. -jay
309 vpg->vpg_write_queued = 0;
310 vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
312 if (pg->cp_sync_io) {
313 LASSERT(PageLocked(vmpage));
314 LASSERT(!PageWriteback(vmpage));
316 LASSERT(PageWriteback(vmpage));
318 * Only mark the page error only when it's an async write
319 * because applications won't wait for IO to finish.
321 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
323 end_page_writeback(vmpage);
328 * Implements cl_page_operations::cpo_make_ready() method.
330 * This is called to yank a page from the transfer cache and to send it out as
331 * a part of transfer. This function try-locks the page. If try-lock failed,
332 * page is owned by some concurrent IO, and should be skipped (this is bad,
333 * but hopefully rare situation, as it usually results in transfer being
334 * shorter than possible).
336 * \retval 0 success, page can be placed into transfer
338 * \retval -EAGAIN page is either used by concurrent IO has been
339 * truncated. Skip it.
341 static int vvp_page_make_ready(const struct lu_env *env,
342 const struct cl_page_slice *slice)
344 struct page *vmpage = cl2vm_page(slice);
345 struct cl_page *pg = slice->cpl_page;
349 if (clear_page_dirty_for_io(vmpage)) {
350 LASSERT(pg->cp_state == CPS_CACHED);
351 /* This actually clears the dirty bit in the radix tree. */
352 set_page_writeback(vmpage);
353 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
354 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
355 } else if (pg->cp_state == CPS_PAGEOUT) {
356 /* is it possible for osc_flush_async_page() to already
361 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
369 static int vvp_page_is_under_lock(const struct lu_env *env,
370 const struct cl_page_slice *slice,
371 struct cl_io *io, pgoff_t *max_index)
373 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
374 io->ci_type == CIT_FAULT) {
375 struct vvp_io *vio = vvp_env_io(env);
377 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
378 *max_index = CL_PAGE_EOF;
383 static int vvp_page_print(const struct lu_env *env,
384 const struct cl_page_slice *slice,
385 void *cookie, lu_printer_t printer)
387 struct vvp_page *vpg = cl2vvp_page(slice);
388 struct page *vmpage = vpg->vpg_page;
390 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
391 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
392 vpg->vpg_write_queued, vmpage);
394 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
395 (long)vmpage->flags, page_count(vmpage),
396 page_mapcount(vmpage), vmpage->private,
398 list_empty(&vmpage->lru) ? "not-" : "");
401 (*printer)(env, cookie, "\n");
406 static int vvp_page_fail(const struct lu_env *env,
407 const struct cl_page_slice *slice)
417 static const struct cl_page_operations vvp_page_ops = {
418 .cpo_own = vvp_page_own,
419 .cpo_assume = vvp_page_assume,
420 .cpo_unassume = vvp_page_unassume,
421 .cpo_disown = vvp_page_disown,
422 .cpo_discard = vvp_page_discard,
423 .cpo_delete = vvp_page_delete,
424 .cpo_export = vvp_page_export,
425 .cpo_is_vmlocked = vvp_page_is_vmlocked,
426 .cpo_fini = vvp_page_fini,
427 .cpo_print = vvp_page_print,
428 .cpo_is_under_lock = vvp_page_is_under_lock,
431 .cpo_prep = vvp_page_prep_read,
432 .cpo_completion = vvp_page_completion_read,
433 .cpo_make_ready = vvp_page_fail,
436 .cpo_prep = vvp_page_prep_write,
437 .cpo_completion = vvp_page_completion_write,
438 .cpo_make_ready = vvp_page_make_ready,
443 static int vvp_transient_page_prep(const struct lu_env *env,
444 const struct cl_page_slice *slice,
445 struct cl_io *unused)
447 /* transient page should always be sent. */
451 static void vvp_transient_page_verify(const struct cl_page *page)
453 struct inode *inode = vvp_object_inode(page->cp_obj);
455 LASSERT(!inode_trylock(inode));
458 static int vvp_transient_page_own(const struct lu_env *env,
459 const struct cl_page_slice *slice,
460 struct cl_io *unused, int nonblock)
462 vvp_transient_page_verify(slice->cpl_page);
466 static void vvp_transient_page_assume(const struct lu_env *env,
467 const struct cl_page_slice *slice,
468 struct cl_io *unused)
470 vvp_transient_page_verify(slice->cpl_page);
473 static void vvp_transient_page_unassume(const struct lu_env *env,
474 const struct cl_page_slice *slice,
475 struct cl_io *unused)
477 vvp_transient_page_verify(slice->cpl_page);
480 static void vvp_transient_page_disown(const struct lu_env *env,
481 const struct cl_page_slice *slice,
482 struct cl_io *unused)
484 vvp_transient_page_verify(slice->cpl_page);
487 static void vvp_transient_page_discard(const struct lu_env *env,
488 const struct cl_page_slice *slice,
489 struct cl_io *unused)
491 struct cl_page *page = slice->cpl_page;
493 vvp_transient_page_verify(slice->cpl_page);
496 * For transient pages, remove it from the radix tree.
498 cl_page_delete(env, page);
501 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
502 const struct cl_page_slice *slice)
504 struct inode *inode = vvp_object_inode(slice->cpl_obj);
507 locked = !inode_trylock(inode);
510 return locked ? -EBUSY : -ENODATA;
514 vvp_transient_page_completion(const struct lu_env *env,
515 const struct cl_page_slice *slice,
518 vvp_transient_page_verify(slice->cpl_page);
521 static void vvp_transient_page_fini(const struct lu_env *env,
522 struct cl_page_slice *slice)
524 struct vvp_page *vpg = cl2vvp_page(slice);
525 struct cl_page *clp = slice->cpl_page;
526 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
528 vvp_page_fini_common(vpg);
529 LASSERT(!inode_trylock(clobj->vob_inode));
530 clobj->vob_transient_pages--;
533 static const struct cl_page_operations vvp_transient_page_ops = {
534 .cpo_own = vvp_transient_page_own,
535 .cpo_assume = vvp_transient_page_assume,
536 .cpo_unassume = vvp_transient_page_unassume,
537 .cpo_disown = vvp_transient_page_disown,
538 .cpo_discard = vvp_transient_page_discard,
539 .cpo_fini = vvp_transient_page_fini,
540 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
541 .cpo_print = vvp_page_print,
542 .cpo_is_under_lock = vvp_page_is_under_lock,
545 .cpo_prep = vvp_transient_page_prep,
546 .cpo_completion = vvp_transient_page_completion,
549 .cpo_prep = vvp_transient_page_prep,
550 .cpo_completion = vvp_transient_page_completion,
555 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
556 struct cl_page *page, pgoff_t index)
558 struct vvp_page *vpg = cl_object_page_slice(obj, page);
559 struct page *vmpage = page->cp_vmpage;
561 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
563 vpg->vpg_page = vmpage;
566 INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
567 if (page->cp_type == CPT_CACHEABLE) {
568 /* in cache, decref in vvp_page_delete */
569 atomic_inc(&page->cp_ref);
570 SetPagePrivate(vmpage);
571 vmpage->private = (unsigned long)page;
572 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
575 struct vvp_object *clobj = cl2vvp(obj);
577 LASSERT(!inode_trylock(clobj->vob_inode));
578 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
579 &vvp_transient_page_ops);
580 clobj->vob_transient_pages++;