4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 #include <linux/sched.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
39 #define DEBUG_SUBSYSTEM S_LLITE
41 #include "../include/obd_support.h"
42 #include "../include/lustre_lite.h"
43 #include "../include/lustre_dlm.h"
44 #include "llite_internal.h"
46 #define SA_OMITTED_ENTRY_MAX 8ULL
49 /** negative values are for error cases */
50 SA_ENTRY_INIT = 0, /** init entry */
51 SA_ENTRY_SUCC = 1, /** stat succeed */
52 SA_ENTRY_INVA = 2, /** invalid entry */
53 SA_ENTRY_DEST = 3, /** entry to be destroyed */
57 /* link into sai->sai_entries */
58 struct list_head se_link;
59 /* link into sai->sai_entries_{received,stated} */
60 struct list_head se_list;
61 /* link into sai hash table locally */
62 struct list_head se_hash;
63 /* entry reference count */
65 /* entry index in the sai */
67 /* low layer ldlm lock handle */
71 /* entry size, contains name */
73 /* pointer to async getattr enqueue info */
74 struct md_enqueue_info *se_minfo;
75 /* pointer to the async getattr request */
76 struct ptlrpc_request *se_req;
77 /* pointer to the target inode */
78 struct inode *se_inode;
83 static unsigned int sai_generation;
84 static DEFINE_SPINLOCK(sai_generation_lock);
87 * The entry only can be released by the caller, it is necessary to hold lock.
89 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
92 return (entry->se_stat != SA_ENTRY_INIT);
95 static inline int ll_sa_entry_hash(int val)
97 return val & LL_SA_CACHE_MASK;
101 * Insert entry to hash SA table.
104 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
106 int i = ll_sa_entry_hash(entry->se_qstr.hash);
108 spin_lock(&sai->sai_cache_lock[i]);
109 list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
110 spin_unlock(&sai->sai_cache_lock[i]);
114 * Remove entry from SA table.
117 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
119 int i = ll_sa_entry_hash(entry->se_qstr.hash);
121 spin_lock(&sai->sai_cache_lock[i]);
122 list_del_init(&entry->se_hash);
123 spin_unlock(&sai->sai_cache_lock[i]);
126 static inline int agl_should_run(struct ll_statahead_info *sai,
129 return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
132 static inline int sa_sent_full(struct ll_statahead_info *sai)
134 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
137 static inline int sa_received_empty(struct ll_statahead_info *sai)
139 return list_empty(&sai->sai_entries_received);
142 static inline int agl_list_empty(struct ll_statahead_info *sai)
144 return list_empty(&sai->sai_entries_agl);
148 * (1) hit ratio less than 80%
150 * (2) consecutive miss more than 8
151 * then means low hit.
153 static inline int sa_low_hit(struct ll_statahead_info *sai)
155 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
156 (sai->sai_consecutive_miss > 8));
160 * If the given index is behind of statahead window more than
161 * SA_OMITTED_ENTRY_MAX, then it is old.
163 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
165 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
170 * Insert it into sai_entries tail when init.
172 static struct ll_sa_entry *
173 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
174 const char *name, int len)
176 struct ll_inode_info *lli;
177 struct ll_sa_entry *entry;
181 entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
182 entry = kzalloc(entry_size, GFP_NOFS);
183 if (unlikely(!entry))
184 return ERR_PTR(-ENOMEM);
186 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
187 len, name, entry, index);
189 entry->se_index = index;
192 * Statahead entry reference rules:
194 * 1) When statahead entry is initialized, its reference is set as 2.
195 * One reference is used by the directory scanner. When the scanner
196 * searches the statahead cache for the given name, it can perform
197 * lockless hash lookup (only the scanner can remove entry from hash
198 * list), and once found, it needn't to call "atomic_inc()" for the
199 * entry reference. So the performance is improved. After using the
200 * statahead entry, the scanner will call "atomic_dec()" to drop the
201 * reference held when initialization. If it is the last reference,
202 * the statahead entry will be freed.
204 * 2) All other threads, including statahead thread and ptlrpcd thread,
205 * when they process the statahead entry, the reference for target
206 * should be held to guarantee the entry will not be released by the
207 * directory scanner. After processing the entry, these threads will
208 * drop the entry reference. If it is the last reference, the entry
211 * The second reference when initializes the statahead entry is used
212 * by the statahead thread, following the rule 2).
214 atomic_set(&entry->se_refcount, 2);
215 entry->se_stat = SA_ENTRY_INIT;
216 entry->se_size = entry_size;
217 dname = (char *)entry + sizeof(struct ll_sa_entry);
218 memcpy(dname, name, len);
220 entry->se_qstr.hash = full_name_hash(name, len);
221 entry->se_qstr.len = len;
222 entry->se_qstr.name = dname;
224 lli = ll_i2info(sai->sai_inode);
225 spin_lock(&lli->lli_sa_lock);
226 list_add_tail(&entry->se_link, &sai->sai_entries);
227 INIT_LIST_HEAD(&entry->se_list);
228 ll_sa_entry_enhash(sai, entry);
229 spin_unlock(&lli->lli_sa_lock);
231 atomic_inc(&sai->sai_cache_count);
237 * Used by the directory scanner to search entry with name.
239 * Only the caller can remove the entry from hash, so it is unnecessary to hold
240 * hash lock. It is caller's duty to release the init refcount on the entry, so
241 * it is also unnecessary to increase refcount on the entry.
243 static struct ll_sa_entry *
244 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
246 struct ll_sa_entry *entry;
247 int i = ll_sa_entry_hash(qstr->hash);
249 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
250 if (entry->se_qstr.hash == qstr->hash &&
251 entry->se_qstr.len == qstr->len &&
252 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
259 * Used by the async getattr request callback to find entry with index.
261 * Inside lli_sa_lock to prevent others to change the list during the search.
262 * It needs to increase entry refcount before returning to guarantee that the
263 * entry cannot be freed by others.
265 static struct ll_sa_entry *
266 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
268 struct ll_sa_entry *entry;
270 list_for_each_entry(entry, &sai->sai_entries, se_link) {
271 if (entry->se_index == index) {
272 LASSERT(atomic_read(&entry->se_refcount) > 0);
273 atomic_inc(&entry->se_refcount);
276 if (entry->se_index > index)
282 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
283 struct ll_sa_entry *entry)
285 struct md_enqueue_info *minfo = entry->se_minfo;
286 struct ptlrpc_request *req = entry->se_req;
289 entry->se_minfo = NULL;
290 ll_intent_release(&minfo->mi_it);
296 entry->se_req = NULL;
297 ptlrpc_req_finished(req);
301 static void ll_sa_entry_put(struct ll_statahead_info *sai,
302 struct ll_sa_entry *entry)
304 if (atomic_dec_and_test(&entry->se_refcount)) {
305 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
306 entry->se_qstr.len, entry->se_qstr.name, entry,
309 LASSERT(list_empty(&entry->se_link));
310 LASSERT(list_empty(&entry->se_list));
311 LASSERT(list_empty(&entry->se_hash));
313 ll_sa_entry_cleanup(sai, entry);
314 iput(entry->se_inode);
317 atomic_dec(&sai->sai_cache_count);
322 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
324 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
326 LASSERT(!list_empty(&entry->se_hash));
327 LASSERT(!list_empty(&entry->se_link));
329 ll_sa_entry_unhash(sai, entry);
331 spin_lock(&lli->lli_sa_lock);
332 entry->se_stat = SA_ENTRY_DEST;
333 list_del_init(&entry->se_link);
334 if (likely(!list_empty(&entry->se_list)))
335 list_del_init(&entry->se_list);
336 spin_unlock(&lli->lli_sa_lock);
338 ll_sa_entry_put(sai, entry);
342 * Delete it from sai_entries_stated list when fini.
345 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
347 struct ll_sa_entry *pos, *next;
350 do_sa_entry_fini(sai, entry);
352 /* drop old entry, only 'scanner' process does this, no need to lock */
353 list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
354 if (!is_omitted_entry(sai, pos->se_index))
356 do_sa_entry_fini(sai, pos);
361 * Inside lli_sa_lock.
364 do_sa_entry_to_stated(struct ll_statahead_info *sai,
365 struct ll_sa_entry *entry, enum se_stat stat)
367 struct ll_sa_entry *se;
368 struct list_head *pos = &sai->sai_entries_stated;
370 if (!list_empty(&entry->se_list))
371 list_del_init(&entry->se_list);
373 list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
374 if (se->se_index < entry->se_index) {
380 list_add(&entry->se_list, pos);
381 entry->se_stat = stat;
385 * Move entry to sai_entries_stated and sort with the index.
386 * \retval 1 -- entry to be destroyed.
387 * \retval 0 -- entry is inserted into stated list.
390 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
391 struct ll_sa_entry *entry, enum se_stat stat)
393 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
396 ll_sa_entry_cleanup(sai, entry);
398 spin_lock(&lli->lli_sa_lock);
399 if (likely(entry->se_stat != SA_ENTRY_DEST)) {
400 do_sa_entry_to_stated(sai, entry, stat);
403 spin_unlock(&lli->lli_sa_lock);
409 * Insert inode into the list of sai_entries_agl.
411 static void ll_agl_add(struct ll_statahead_info *sai,
412 struct inode *inode, int index)
414 struct ll_inode_info *child = ll_i2info(inode);
415 struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
418 spin_lock(&child->lli_agl_lock);
419 if (child->lli_agl_index == 0) {
420 child->lli_agl_index = index;
421 spin_unlock(&child->lli_agl_lock);
423 LASSERT(list_empty(&child->lli_agl_list));
426 spin_lock(&parent->lli_agl_lock);
427 if (list_empty(&sai->sai_entries_agl))
429 list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
430 spin_unlock(&parent->lli_agl_lock);
432 spin_unlock(&child->lli_agl_lock);
436 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
439 static struct ll_statahead_info *ll_sai_alloc(void)
441 struct ll_statahead_info *sai;
444 sai = kzalloc(sizeof(*sai), GFP_NOFS);
448 atomic_set(&sai->sai_refcount, 1);
450 spin_lock(&sai_generation_lock);
451 sai->sai_generation = ++sai_generation;
452 if (unlikely(sai_generation == 0))
453 sai->sai_generation = ++sai_generation;
454 spin_unlock(&sai_generation_lock);
456 sai->sai_max = LL_SA_RPC_MIN;
458 init_waitqueue_head(&sai->sai_waitq);
459 init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
460 init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
462 INIT_LIST_HEAD(&sai->sai_entries);
463 INIT_LIST_HEAD(&sai->sai_entries_received);
464 INIT_LIST_HEAD(&sai->sai_entries_stated);
465 INIT_LIST_HEAD(&sai->sai_entries_agl);
467 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
468 INIT_LIST_HEAD(&sai->sai_cache[i]);
469 spin_lock_init(&sai->sai_cache_lock[i]);
471 atomic_set(&sai->sai_cache_count, 0);
476 static inline struct ll_statahead_info *
477 ll_sai_get(struct ll_statahead_info *sai)
479 atomic_inc(&sai->sai_refcount);
483 static void ll_sai_put(struct ll_statahead_info *sai)
485 struct inode *inode = sai->sai_inode;
486 struct ll_inode_info *lli = ll_i2info(inode);
488 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
489 struct ll_sa_entry *entry, *next;
491 if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
492 /* It is race case, the interpret callback just hold
495 spin_unlock(&lli->lli_sa_lock);
499 LASSERT(!lli->lli_opendir_key);
500 LASSERT(thread_is_stopped(&sai->sai_thread));
501 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
504 lli->lli_opendir_pid = 0;
505 spin_unlock(&lli->lli_sa_lock);
507 if (sai->sai_sent > sai->sai_replied)
508 CDEBUG(D_READA, "statahead for dir "DFID
509 " does not finish: [sent:%llu] [replied:%llu]\n",
511 sai->sai_sent, sai->sai_replied);
513 list_for_each_entry_safe(entry, next, &sai->sai_entries,
515 do_sa_entry_fini(sai, entry);
517 LASSERT(list_empty(&sai->sai_entries));
518 LASSERT(list_empty(&sai->sai_entries_received));
519 LASSERT(list_empty(&sai->sai_entries_stated));
521 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
522 LASSERT(list_empty(&sai->sai_entries_agl));
529 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
530 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
532 struct ll_inode_info *lli = ll_i2info(inode);
533 __u64 index = lli->lli_agl_index;
536 LASSERT(list_empty(&lli->lli_agl_list));
538 /* AGL maybe fall behind statahead with one entry */
539 if (is_omitted_entry(sai, index + 1)) {
540 lli->lli_agl_index = 0;
545 /* Someone is in glimpse (sync or async), do nothing. */
546 rc = down_write_trylock(&lli->lli_glimpse_sem);
548 lli->lli_agl_index = 0;
554 * Someone triggered glimpse within 1 sec before.
555 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
556 * if the lock is still cached on client, AGL needs to do nothing. If
557 * it is cancelled by other client, AGL maybe cannot obtain new lock
558 * for no glimpse callback triggered by AGL.
559 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
560 * Under such case, it is quite possible that the OST will not grant
561 * glimpse lock for AGL also.
562 * 3) The former glimpse failed, compared with other two cases, it is
563 * relative rare. AGL can ignore such case, and it will not muchly
564 * affect the performance.
566 if (lli->lli_glimpse_time != 0 &&
567 time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
568 up_write(&lli->lli_glimpse_sem);
569 lli->lli_agl_index = 0;
574 CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
575 DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
578 lli->lli_agl_index = 0;
579 lli->lli_glimpse_time = cfs_time_current();
580 up_write(&lli->lli_glimpse_sem);
582 CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
583 DFID", idx = %llu, rc = %d\n",
584 PFID(&lli->lli_fid), index, rc);
589 static void ll_post_statahead(struct ll_statahead_info *sai)
591 struct inode *dir = sai->sai_inode;
593 struct ll_inode_info *lli = ll_i2info(dir);
594 struct ll_sa_entry *entry;
595 struct md_enqueue_info *minfo;
596 struct lookup_intent *it;
597 struct ptlrpc_request *req;
598 struct mdt_body *body;
601 spin_lock(&lli->lli_sa_lock);
602 if (unlikely(list_empty(&sai->sai_entries_received))) {
603 spin_unlock(&lli->lli_sa_lock);
606 entry = list_entry(sai->sai_entries_received.next,
607 struct ll_sa_entry, se_list);
608 atomic_inc(&entry->se_refcount);
609 list_del_init(&entry->se_list);
610 spin_unlock(&lli->lli_sa_lock);
612 LASSERT(entry->se_handle != 0);
614 minfo = entry->se_minfo;
617 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
623 child = entry->se_inode;
628 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
630 /* XXX: No fid in reply, this is probably cross-ref case.
631 * SA can't handle it yet.
633 if (body->valid & OBD_MD_MDS) {
641 /* unlinked and re-created with the same name */
642 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))) {
643 entry->se_inode = NULL;
649 it->it_lock_handle = entry->se_handle;
650 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
656 rc = ll_prep_inode(&child, req, dir->i_sb, it);
660 CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
661 ll_get_fsname(child->i_sb, NULL, 0),
662 PFID(ll_inode2fid(child)), child);
663 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
665 entry->se_inode = child;
667 if (agl_should_run(sai, child))
668 ll_agl_add(sai, child, entry->se_index);
671 /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
672 * reference count by calling "ll_intent_drop_lock()" in spite of the
673 * above operations failed or not. Do not worry about calling
674 * "ll_intent_drop_lock()" more than once.
676 rc = ll_sa_entry_to_stated(sai, entry,
677 rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
678 if (rc == 0 && entry->se_index == sai->sai_index_wait)
679 wake_up(&sai->sai_waitq);
680 ll_sa_entry_put(sai, entry);
683 static int ll_statahead_interpret(struct ptlrpc_request *req,
684 struct md_enqueue_info *minfo, int rc)
686 struct lookup_intent *it = &minfo->mi_it;
687 struct inode *dir = minfo->mi_dir;
688 struct ll_inode_info *lli = ll_i2info(dir);
689 struct ll_statahead_info *sai = NULL;
690 struct ll_sa_entry *entry;
694 if (it_disposition(it, DISP_LOOKUP_NEG))
698 /* release ibits lock ASAP to avoid deadlock when statahead
699 * thread enqueues lock on parent in readdir and another
700 * process enqueues lock on child with parent lock held, eg.
703 handle = it->it_lock_handle;
704 ll_intent_drop_lock(it);
707 spin_lock(&lli->lli_sa_lock);
709 if (unlikely(!lli->lli_sai ||
710 lli->lli_sai->sai_generation != minfo->mi_generation)) {
711 spin_unlock(&lli->lli_sa_lock);
715 sai = ll_sai_get(lli->lli_sai);
716 if (unlikely(!thread_is_running(&sai->sai_thread))) {
718 spin_unlock(&lli->lli_sa_lock);
723 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
726 spin_unlock(&lli->lli_sa_lock);
732 do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
733 wakeup = (entry->se_index == sai->sai_index_wait);
735 entry->se_minfo = minfo;
736 entry->se_req = ptlrpc_request_addref(req);
737 /* Release the async ibits lock ASAP to avoid deadlock
738 * when statahead thread tries to enqueue lock on parent
739 * for readpage and other tries to enqueue lock on child
740 * with parent's lock held, for example: unlink.
742 entry->se_handle = handle;
743 wakeup = list_empty(&sai->sai_entries_received);
744 list_add_tail(&entry->se_list,
745 &sai->sai_entries_received);
748 spin_unlock(&lli->lli_sa_lock);
750 ll_sa_entry_put(sai, entry);
752 wake_up(&sai->sai_thread.t_ctl_waitq);
757 ll_intent_release(it);
766 static void sa_args_fini(struct md_enqueue_info *minfo,
767 struct ldlm_enqueue_info *einfo)
769 LASSERT(minfo && einfo);
776 * prepare arguments for async stat RPC.
778 static int sa_args_init(struct inode *dir, struct inode *child,
779 struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
780 struct ldlm_enqueue_info **pei)
782 struct qstr *qstr = &entry->se_qstr;
783 struct ll_inode_info *lli = ll_i2info(dir);
784 struct md_enqueue_info *minfo;
785 struct ldlm_enqueue_info *einfo;
786 struct md_op_data *op_data;
788 einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
792 minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
798 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
799 qstr->len, 0, LUSTRE_OPC_ANY, NULL);
800 if (IS_ERR(op_data)) {
803 return PTR_ERR(op_data);
806 minfo->mi_it.it_op = IT_GETATTR;
807 minfo->mi_dir = igrab(dir);
808 minfo->mi_cb = ll_statahead_interpret;
809 minfo->mi_generation = lli->lli_sai->sai_generation;
810 minfo->mi_cbdata = entry->se_index;
812 einfo->ei_type = LDLM_IBITS;
813 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
814 einfo->ei_cb_bl = ll_md_blocking_ast;
815 einfo->ei_cb_cp = ldlm_completion_ast;
816 einfo->ei_cb_gl = NULL;
817 einfo->ei_cbdata = NULL;
825 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
827 struct md_enqueue_info *minfo;
828 struct ldlm_enqueue_info *einfo;
831 rc = sa_args_init(dir, NULL, entry, &minfo, &einfo);
835 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
837 sa_args_fini(minfo, einfo);
843 * similar to ll_revalidate_it().
844 * \retval 1 -- dentry valid
845 * \retval 0 -- will send stat-ahead request
846 * \retval others -- prepare stat-ahead request failed
848 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
849 struct dentry *dentry)
851 struct inode *inode = d_inode(dentry);
852 struct lookup_intent it = { .it_op = IT_GETATTR,
853 .it_lock_handle = 0 };
854 struct md_enqueue_info *minfo;
855 struct ldlm_enqueue_info *einfo;
858 if (unlikely(!inode))
861 if (d_mountpoint(dentry))
864 entry->se_inode = igrab(inode);
865 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
868 entry->se_handle = it.it_lock_handle;
869 ll_intent_release(&it);
873 rc = sa_args_init(dir, inode, entry, &minfo, &einfo);
875 entry->se_inode = NULL;
880 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
882 entry->se_inode = NULL;
884 sa_args_fini(minfo, einfo);
890 static void ll_statahead_one(struct dentry *parent, const char *entry_name,
893 struct inode *dir = d_inode(parent);
894 struct ll_inode_info *lli = ll_i2info(dir);
895 struct ll_statahead_info *sai = lli->lli_sai;
896 struct dentry *dentry = NULL;
897 struct ll_sa_entry *entry;
901 entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
906 dentry = d_lookup(parent, &entry->se_qstr);
908 rc = do_sa_lookup(dir, entry);
910 rc = do_sa_revalidate(dir, entry, dentry);
911 if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
912 ll_agl_add(sai, d_inode(dentry), entry->se_index);
918 rc1 = ll_sa_entry_to_stated(sai, entry,
919 rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
920 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
921 wake_up(&sai->sai_waitq);
927 /* drop one refcount on entry by ll_sa_entry_alloc */
928 ll_sa_entry_put(sai, entry);
931 static int ll_agl_thread(void *arg)
933 struct dentry *parent = arg;
934 struct inode *dir = d_inode(parent);
935 struct ll_inode_info *plli = ll_i2info(dir);
936 struct ll_inode_info *clli;
937 struct ll_sb_info *sbi = ll_i2sbi(dir);
938 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
939 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
940 struct l_wait_info lwi = { 0 };
942 thread->t_pid = current_pid();
943 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
946 atomic_inc(&sbi->ll_agl_total);
947 spin_lock(&plli->lli_agl_lock);
948 sai->sai_agl_valid = 1;
949 if (thread_is_init(thread))
950 /* If someone else has changed the thread state
951 * (e.g. already changed to SVC_STOPPING), we can't just
952 * blindly overwrite that setting.
954 thread_set_flags(thread, SVC_RUNNING);
955 spin_unlock(&plli->lli_agl_lock);
956 wake_up(&thread->t_ctl_waitq);
959 l_wait_event(thread->t_ctl_waitq,
960 !list_empty(&sai->sai_entries_agl) ||
961 !thread_is_running(thread),
964 if (!thread_is_running(thread))
967 spin_lock(&plli->lli_agl_lock);
968 /* The statahead thread maybe help to process AGL entries,
969 * so check whether list empty again.
971 if (!list_empty(&sai->sai_entries_agl)) {
972 clli = list_entry(sai->sai_entries_agl.next,
973 struct ll_inode_info, lli_agl_list);
974 list_del_init(&clli->lli_agl_list);
975 spin_unlock(&plli->lli_agl_lock);
976 ll_agl_trigger(&clli->lli_vfs_inode, sai);
978 spin_unlock(&plli->lli_agl_lock);
982 spin_lock(&plli->lli_agl_lock);
983 sai->sai_agl_valid = 0;
984 while (!list_empty(&sai->sai_entries_agl)) {
985 clli = list_entry(sai->sai_entries_agl.next,
986 struct ll_inode_info, lli_agl_list);
987 list_del_init(&clli->lli_agl_list);
988 spin_unlock(&plli->lli_agl_lock);
989 clli->lli_agl_index = 0;
990 iput(&clli->lli_vfs_inode);
991 spin_lock(&plli->lli_agl_lock);
993 thread_set_flags(thread, SVC_STOPPED);
994 spin_unlock(&plli->lli_agl_lock);
995 wake_up(&thread->t_ctl_waitq);
997 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1002 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1004 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1005 struct l_wait_info lwi = { 0 };
1006 struct ll_inode_info *plli;
1007 struct task_struct *task;
1009 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1012 plli = ll_i2info(d_inode(parent));
1013 task = kthread_run(ll_agl_thread, parent, "ll_agl_%u",
1014 plli->lli_opendir_pid);
1016 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1017 thread_set_flags(thread, SVC_STOPPED);
1021 l_wait_event(thread->t_ctl_waitq,
1022 thread_is_running(thread) || thread_is_stopped(thread),
1026 static int ll_statahead_thread(void *arg)
1028 struct dentry *parent = arg;
1029 struct inode *dir = d_inode(parent);
1030 struct ll_inode_info *plli = ll_i2info(dir);
1031 struct ll_inode_info *clli;
1032 struct ll_sb_info *sbi = ll_i2sbi(dir);
1033 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
1034 struct ptlrpc_thread *thread = &sai->sai_thread;
1035 struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1040 struct ll_dir_chain chain;
1041 struct l_wait_info lwi = { 0 };
1043 thread->t_pid = current_pid();
1044 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1047 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1048 ll_start_agl(parent, sai);
1050 atomic_inc(&sbi->ll_sa_total);
1051 spin_lock(&plli->lli_sa_lock);
1052 if (thread_is_init(thread))
1053 /* If someone else has changed the thread state
1054 * (e.g. already changed to SVC_STOPPING), we can't just
1055 * blindly overwrite that setting.
1057 thread_set_flags(thread, SVC_RUNNING);
1058 spin_unlock(&plli->lli_sa_lock);
1059 wake_up(&thread->t_ctl_waitq);
1061 ll_dir_chain_init(&chain);
1062 page = ll_get_dir_page(dir, pos, &chain);
1065 struct lu_dirpage *dp;
1066 struct lu_dirent *ent;
1070 CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
1071 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1072 rc, plli->lli_opendir_pid);
1076 dp = page_address(page);
1077 for (ent = lu_dirent_start(dp); ent;
1078 ent = lu_dirent_next(ent)) {
1083 hash = le64_to_cpu(ent->lde_hash);
1084 if (unlikely(hash < pos))
1086 * Skip until we find target hash value.
1090 namelen = le16_to_cpu(ent->lde_namelen);
1091 if (unlikely(namelen == 0))
1093 * Skip dummy record.
1097 name = ent->lde_name;
1098 if (name[0] == '.') {
1104 } else if (name[1] == '.' && namelen == 2) {
1109 } else if (!sai->sai_ls_all) {
1111 * skip hidden files.
1113 sai->sai_skip_hidden++;
1119 * don't stat-ahead first entry.
1121 if (unlikely(++first == 1))
1125 l_wait_event(thread->t_ctl_waitq,
1126 !sa_sent_full(sai) ||
1127 !list_empty(&sai->sai_entries_received) ||
1128 !list_empty(&sai->sai_entries_agl) ||
1129 !thread_is_running(thread),
1133 while (!list_empty(&sai->sai_entries_received))
1134 ll_post_statahead(sai);
1136 if (unlikely(!thread_is_running(thread))) {
1137 ll_release_page(page, 0);
1142 /* If no window for metadata statahead, but there are
1143 * some AGL entries to be triggered, then try to help
1144 * to process the AGL entries.
1146 if (sa_sent_full(sai)) {
1147 spin_lock(&plli->lli_agl_lock);
1148 while (!list_empty(&sai->sai_entries_agl)) {
1149 clli = list_entry(sai->sai_entries_agl.next,
1150 struct ll_inode_info, lli_agl_list);
1151 list_del_init(&clli->lli_agl_list);
1152 spin_unlock(&plli->lli_agl_lock);
1153 ll_agl_trigger(&clli->lli_vfs_inode,
1156 if (!list_empty(&sai->sai_entries_received))
1160 !thread_is_running(thread))) {
1161 ll_release_page(page, 0);
1166 if (!sa_sent_full(sai))
1169 spin_lock(&plli->lli_agl_lock);
1171 spin_unlock(&plli->lli_agl_lock);
1177 ll_statahead_one(parent, name, namelen);
1179 pos = le64_to_cpu(dp->ldp_hash_end);
1180 if (pos == MDS_DIR_END_OFF) {
1182 * End of directory reached.
1184 ll_release_page(page, 0);
1186 l_wait_event(thread->t_ctl_waitq,
1187 !list_empty(&sai->sai_entries_received) ||
1188 sai->sai_sent == sai->sai_replied ||
1189 !thread_is_running(thread),
1192 while (!list_empty(&sai->sai_entries_received))
1193 ll_post_statahead(sai);
1195 if (unlikely(!thread_is_running(thread))) {
1200 if (sai->sai_sent == sai->sai_replied &&
1201 list_empty(&sai->sai_entries_received))
1205 spin_lock(&plli->lli_agl_lock);
1206 while (!list_empty(&sai->sai_entries_agl) &&
1207 thread_is_running(thread)) {
1208 clli = list_entry(sai->sai_entries_agl.next,
1209 struct ll_inode_info, lli_agl_list);
1210 list_del_init(&clli->lli_agl_list);
1211 spin_unlock(&plli->lli_agl_lock);
1212 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1213 spin_lock(&plli->lli_agl_lock);
1215 spin_unlock(&plli->lli_agl_lock);
1221 * chain is exhausted.
1222 * Normal case: continue to the next page.
1224 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1226 page = ll_get_dir_page(dir, pos, &chain);
1228 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1229 ll_release_page(page, 1);
1231 * go into overflow page.
1237 if (sai->sai_agl_valid) {
1238 spin_lock(&plli->lli_agl_lock);
1239 thread_set_flags(agl_thread, SVC_STOPPING);
1240 spin_unlock(&plli->lli_agl_lock);
1241 wake_up(&agl_thread->t_ctl_waitq);
1243 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1244 sai, (unsigned int)agl_thread->t_pid);
1245 l_wait_event(agl_thread->t_ctl_waitq,
1246 thread_is_stopped(agl_thread),
1249 /* Set agl_thread flags anyway. */
1250 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1252 ll_dir_chain_fini(&chain);
1253 spin_lock(&plli->lli_sa_lock);
1254 if (!list_empty(&sai->sai_entries_received)) {
1255 thread_set_flags(thread, SVC_STOPPING);
1256 spin_unlock(&plli->lli_sa_lock);
1258 /* To release the resources held by received entries. */
1259 while (!list_empty(&sai->sai_entries_received))
1260 ll_post_statahead(sai);
1262 spin_lock(&plli->lli_sa_lock);
1264 thread_set_flags(thread, SVC_STOPPED);
1265 spin_unlock(&plli->lli_sa_lock);
1266 wake_up(&sai->sai_waitq);
1267 wake_up(&thread->t_ctl_waitq);
1270 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1276 * called in ll_file_release().
1278 void ll_stop_statahead(struct inode *dir, void *key)
1280 struct ll_inode_info *lli = ll_i2info(dir);
1285 spin_lock(&lli->lli_sa_lock);
1286 if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1287 spin_unlock(&lli->lli_sa_lock);
1291 lli->lli_opendir_key = NULL;
1294 struct l_wait_info lwi = { 0 };
1295 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1297 if (!thread_is_stopped(thread)) {
1298 thread_set_flags(thread, SVC_STOPPING);
1299 spin_unlock(&lli->lli_sa_lock);
1300 wake_up(&thread->t_ctl_waitq);
1302 CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
1303 lli->lli_sai, (unsigned int)thread->t_pid);
1304 l_wait_event(thread->t_ctl_waitq,
1305 thread_is_stopped(thread),
1308 spin_unlock(&lli->lli_sa_lock);
1312 * Put the ref which was held when first statahead_enter.
1313 * It maybe not the last ref for some statahead requests
1316 ll_sai_put(lli->lli_sai);
1318 lli->lli_opendir_pid = 0;
1319 spin_unlock(&lli->lli_sa_lock);
1325 * not first dirent, or is "."
1327 LS_NONE_FIRST_DE = 0,
1329 * the first non-hidden dirent
1333 * the first hidden dirent, that is "."
1338 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1340 struct ll_dir_chain chain;
1341 struct qstr *target = &dentry->d_name;
1345 int rc = LS_NONE_FIRST_DE;
1347 ll_dir_chain_init(&chain);
1348 page = ll_get_dir_page(dir, pos, &chain);
1351 struct lu_dirpage *dp;
1352 struct lu_dirent *ent;
1355 struct ll_inode_info *lli = ll_i2info(dir);
1358 CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
1359 PFID(ll_inode2fid(dir)), pos,
1360 rc, lli->lli_opendir_pid);
1364 dp = page_address(page);
1365 for (ent = lu_dirent_start(dp); ent;
1366 ent = lu_dirent_next(ent)) {
1371 hash = le64_to_cpu(ent->lde_hash);
1372 /* The ll_get_dir_page() can return any page containing
1373 * the given hash which may be not the start hash.
1375 if (unlikely(hash < pos))
1378 namelen = le16_to_cpu(ent->lde_namelen);
1379 if (unlikely(namelen == 0))
1381 * skip dummy record.
1385 name = ent->lde_name;
1386 if (name[0] == '.') {
1392 else if (name[1] == '.' && namelen == 2)
1403 if (dot_de && target->name[0] != '.') {
1404 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1405 target->len, target->name,
1410 if (target->len != namelen ||
1411 memcmp(target->name, name, namelen) != 0)
1412 rc = LS_NONE_FIRST_DE;
1416 rc = LS_FIRST_DOT_DE;
1418 ll_release_page(page, 0);
1421 pos = le64_to_cpu(dp->ldp_hash_end);
1422 if (pos == MDS_DIR_END_OFF) {
1424 * End of directory reached.
1426 ll_release_page(page, 0);
1430 * chain is exhausted
1431 * Normal case: continue to the next page.
1433 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1435 page = ll_get_dir_page(dir, pos, &chain);
1438 * go into overflow page.
1440 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1441 ll_release_page(page, 1);
1446 ll_dir_chain_fini(&chain);
1451 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1453 struct ptlrpc_thread *thread = &sai->sai_thread;
1454 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
1457 if (entry && entry->se_stat == SA_ENTRY_SUCC)
1462 ll_sa_entry_fini(sai, entry);
1465 sai->sai_consecutive_miss = 0;
1466 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1468 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1471 sai->sai_consecutive_miss++;
1472 if (sa_low_hit(sai) && thread_is_running(thread)) {
1473 atomic_inc(&sbi->ll_sa_wrong);
1474 CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1475 PFID(&lli->lli_fid), sai->sai_hit,
1476 sai->sai_miss, sai->sai_sent,
1478 spin_lock(&lli->lli_sa_lock);
1479 if (!thread_is_stopped(thread))
1480 thread_set_flags(thread, SVC_STOPPING);
1481 spin_unlock(&lli->lli_sa_lock);
1485 if (!thread_is_stopped(thread))
1486 wake_up(&thread->t_ctl_waitq);
1490 * Start statahead thread if this is the first dir entry.
1491 * Otherwise if a thread is started already, wait it until it is ahead of me.
1492 * \retval 1 -- find entry with lock in cache, the caller needs to do
1494 * \retval 0 -- find entry in cache, but without lock, the caller needs
1496 * \retval others -- the caller need to process as non-statahead.
1498 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1501 struct ll_inode_info *lli = ll_i2info(dir);
1502 struct ll_statahead_info *sai = lli->lli_sai;
1503 struct dentry *parent;
1504 struct ll_sa_entry *entry;
1505 struct ptlrpc_thread *thread;
1506 struct l_wait_info lwi = { 0 };
1507 struct task_struct *task;
1509 struct ll_inode_info *plli;
1511 LASSERT(lli->lli_opendir_pid == current_pid());
1514 thread = &sai->sai_thread;
1515 if (unlikely(thread_is_stopped(thread) &&
1516 list_empty(&sai->sai_entries_stated))) {
1517 /* to release resource */
1518 ll_stop_statahead(dir, lli->lli_opendir_key);
1522 if ((*dentryp)->d_name.name[0] == '.') {
1523 if (sai->sai_ls_all ||
1524 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1526 * Hidden dentry is the first one, or statahead
1527 * thread does not skip so many hidden dentries
1528 * before "sai_ls_all" enabled as below.
1531 if (!sai->sai_ls_all)
1533 * It maybe because hidden dentry is not
1534 * the first one, "sai_ls_all" was not
1535 * set, then "ls -al" missed. Enable
1536 * "sai_ls_all" for such case.
1538 sai->sai_ls_all = 1;
1541 * Such "getattr" has been skipped before
1542 * "sai_ls_all" enabled as above.
1544 sai->sai_miss_hidden++;
1549 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1550 if (!entry || only_unplug) {
1551 ll_sai_unplug(sai, entry);
1552 return entry ? 1 : -EAGAIN;
1555 if (!ll_sa_entry_stated(entry)) {
1556 sai->sai_index_wait = entry->se_index;
1557 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1558 LWI_ON_SIGNAL_NOOP, NULL);
1559 rc = l_wait_event(sai->sai_waitq,
1560 ll_sa_entry_stated(entry) ||
1561 thread_is_stopped(thread),
1564 ll_sai_unplug(sai, entry);
1569 if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
1570 struct inode *inode = entry->se_inode;
1571 struct lookup_intent it = { .it_op = IT_GETATTR,
1576 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1577 ll_inode2fid(inode), &bits);
1579 if (!d_inode(*dentryp)) {
1580 struct dentry *alias;
1582 alias = ll_splice_alias(inode,
1584 if (IS_ERR(alias)) {
1585 ll_sai_unplug(sai, entry);
1586 return PTR_ERR(alias);
1589 } else if (d_inode(*dentryp) != inode) {
1590 /* revalidate, but inode is recreated */
1591 CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
1592 ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
1594 PFID(ll_inode2fid(d_inode(*dentryp))),
1595 PFID(ll_inode2fid(inode)));
1596 ll_sai_unplug(sai, entry);
1601 entry->se_inode = NULL;
1603 if ((bits & MDS_INODELOCK_LOOKUP) &&
1604 d_lustre_invalid(*dentryp))
1605 d_lustre_revalidate(*dentryp);
1606 ll_intent_release(&it);
1610 ll_sai_unplug(sai, entry);
1614 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1615 rc = is_first_dirent(dir, *dentryp);
1616 if (rc == LS_NONE_FIRST_DE) {
1617 /* It is not "ls -{a}l" operation, no need statahead for it. */
1622 sai = ll_sai_alloc();
1628 sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1629 sai->sai_inode = igrab(dir);
1630 if (unlikely(!sai->sai_inode)) {
1631 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1632 PFID(&lli->lli_fid));
1637 /* get parent reference count here, and put it in ll_statahead_thread */
1638 parent = dget((*dentryp)->d_parent);
1639 if (unlikely(sai->sai_inode != d_inode(parent))) {
1640 struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
1642 CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
1644 PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1646 iput(sai->sai_inode);
1651 CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1654 /* The sai buffer already has one reference taken at allocation time,
1655 * but as soon as we expose the sai by attaching it to the lli that
1656 * default reference can be dropped by another thread calling
1657 * ll_stop_statahead. We need to take a local reference to protect
1658 * the sai buffer while we intend to access it.
1663 plli = ll_i2info(d_inode(parent));
1664 task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1665 plli->lli_opendir_pid);
1666 thread = &sai->sai_thread;
1669 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1671 lli->lli_opendir_key = NULL;
1672 thread_set_flags(thread, SVC_STOPPED);
1673 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1674 /* Drop both our own local reference and the default
1675 * reference from allocation time.
1679 LASSERT(!lli->lli_sai);
1683 l_wait_event(thread->t_ctl_waitq,
1684 thread_is_running(thread) || thread_is_stopped(thread),
1689 * We don't stat-ahead for the first dirent since we are already in
1696 spin_lock(&lli->lli_sa_lock);
1697 lli->lli_opendir_key = NULL;
1698 lli->lli_opendir_pid = 0;
1699 spin_unlock(&lli->lli_sa_lock);