Merge branch 'mymd/for-next' into mymd/for-linus
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #include <linux/fs.h>
34 #include <linux/sched.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38
39 #define DEBUG_SUBSYSTEM S_LLITE
40
41 #include "../include/obd_support.h"
42 #include "../include/lustre_lite.h"
43 #include "../include/lustre_dlm.h"
44 #include "llite_internal.h"
45
46 #define SA_OMITTED_ENTRY_MAX 8ULL
47
48 enum se_stat {
49         /** negative values are for error cases */
50         SA_ENTRY_INIT = 0,      /** init entry */
51         SA_ENTRY_SUCC = 1,      /** stat succeed */
52         SA_ENTRY_INVA = 2,      /** invalid entry */
53         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
54 };
55
56 struct ll_sa_entry {
57         /* link into sai->sai_entries */
58         struct list_head              se_link;
59         /* link into sai->sai_entries_{received,stated} */
60         struct list_head              se_list;
61         /* link into sai hash table locally */
62         struct list_head              se_hash;
63         /* entry reference count */
64         atomic_t            se_refcount;
65         /* entry index in the sai */
66         __u64              se_index;
67         /* low layer ldlm lock handle */
68         __u64              se_handle;
69         /* entry status */
70         enum se_stat       se_stat;
71         /* entry size, contains name */
72         int                  se_size;
73         /* pointer to async getattr enqueue info */
74         struct md_enqueue_info *se_minfo;
75         /* pointer to the async getattr request */
76         struct ptlrpc_request  *se_req;
77         /* pointer to the target inode */
78         struct inode       *se_inode;
79         /* entry name */
80         struct qstr          se_qstr;
81 };
82
83 static unsigned int sai_generation;
84 static DEFINE_SPINLOCK(sai_generation_lock);
85
86 /*
87  * The entry only can be released by the caller, it is necessary to hold lock.
88  */
89 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
90 {
91         smp_rmb();
92         return (entry->se_stat != SA_ENTRY_INIT);
93 }
94
95 static inline int ll_sa_entry_hash(int val)
96 {
97         return val & LL_SA_CACHE_MASK;
98 }
99
100 /*
101  * Insert entry to hash SA table.
102  */
103 static inline void
104 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
105 {
106         int i = ll_sa_entry_hash(entry->se_qstr.hash);
107
108         spin_lock(&sai->sai_cache_lock[i]);
109         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
110         spin_unlock(&sai->sai_cache_lock[i]);
111 }
112
113 /*
114  * Remove entry from SA table.
115  */
116 static inline void
117 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
118 {
119         int i = ll_sa_entry_hash(entry->se_qstr.hash);
120
121         spin_lock(&sai->sai_cache_lock[i]);
122         list_del_init(&entry->se_hash);
123         spin_unlock(&sai->sai_cache_lock[i]);
124 }
125
126 static inline int agl_should_run(struct ll_statahead_info *sai,
127                                  struct inode *inode)
128 {
129         return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
130 }
131
132 static inline int sa_sent_full(struct ll_statahead_info *sai)
133 {
134         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
135 }
136
137 static inline int sa_received_empty(struct ll_statahead_info *sai)
138 {
139         return list_empty(&sai->sai_entries_received);
140 }
141
142 static inline int agl_list_empty(struct ll_statahead_info *sai)
143 {
144         return list_empty(&sai->sai_entries_agl);
145 }
146
147 /**
148  * (1) hit ratio less than 80%
149  * or
150  * (2) consecutive miss more than 8
151  * then means low hit.
152  */
153 static inline int sa_low_hit(struct ll_statahead_info *sai)
154 {
155         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
156                 (sai->sai_consecutive_miss > 8));
157 }
158
159 /*
160  * If the given index is behind of statahead window more than
161  * SA_OMITTED_ENTRY_MAX, then it is old.
162  */
163 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
164 {
165         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
166                  sai->sai_index);
167 }
168
169 /*
170  * Insert it into sai_entries tail when init.
171  */
172 static struct ll_sa_entry *
173 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
174                   const char *name, int len)
175 {
176         struct ll_inode_info *lli;
177         struct ll_sa_entry   *entry;
178         int                entry_size;
179         char             *dname;
180
181         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
182         entry = kzalloc(entry_size, GFP_NOFS);
183         if (unlikely(!entry))
184                 return ERR_PTR(-ENOMEM);
185
186         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
187                len, name, entry, index);
188
189         entry->se_index = index;
190
191         /*
192          * Statahead entry reference rules:
193          *
194          * 1) When statahead entry is initialized, its reference is set as 2.
195          *    One reference is used by the directory scanner. When the scanner
196          *    searches the statahead cache for the given name, it can perform
197          *    lockless hash lookup (only the scanner can remove entry from hash
198          *    list), and once found, it needn't to call "atomic_inc()" for the
199          *    entry reference. So the performance is improved. After using the
200          *    statahead entry, the scanner will call "atomic_dec()" to drop the
201          *    reference held when initialization. If it is the last reference,
202          *    the statahead entry will be freed.
203          *
204          * 2) All other threads, including statahead thread and ptlrpcd thread,
205          *    when they process the statahead entry, the reference for target
206          *    should be held to guarantee the entry will not be released by the
207          *    directory scanner. After processing the entry, these threads will
208          *    drop the entry reference. If it is the last reference, the entry
209          *    will be freed.
210          *
211          *    The second reference when initializes the statahead entry is used
212          *    by the statahead thread, following the rule 2).
213          */
214         atomic_set(&entry->se_refcount, 2);
215         entry->se_stat = SA_ENTRY_INIT;
216         entry->se_size = entry_size;
217         dname = (char *)entry + sizeof(struct ll_sa_entry);
218         memcpy(dname, name, len);
219         dname[len] = 0;
220         entry->se_qstr.hash = full_name_hash(name, len);
221         entry->se_qstr.len = len;
222         entry->se_qstr.name = dname;
223
224         lli = ll_i2info(sai->sai_inode);
225         spin_lock(&lli->lli_sa_lock);
226         list_add_tail(&entry->se_link, &sai->sai_entries);
227         INIT_LIST_HEAD(&entry->se_list);
228         ll_sa_entry_enhash(sai, entry);
229         spin_unlock(&lli->lli_sa_lock);
230
231         atomic_inc(&sai->sai_cache_count);
232
233         return entry;
234 }
235
236 /*
237  * Used by the directory scanner to search entry with name.
238  *
239  * Only the caller can remove the entry from hash, so it is unnecessary to hold
240  * hash lock. It is caller's duty to release the init refcount on the entry, so
241  * it is also unnecessary to increase refcount on the entry.
242  */
243 static struct ll_sa_entry *
244 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
245 {
246         struct ll_sa_entry *entry;
247         int i = ll_sa_entry_hash(qstr->hash);
248
249         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
250                 if (entry->se_qstr.hash == qstr->hash &&
251                     entry->se_qstr.len == qstr->len &&
252                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
253                         return entry;
254         }
255         return NULL;
256 }
257
258 /*
259  * Used by the async getattr request callback to find entry with index.
260  *
261  * Inside lli_sa_lock to prevent others to change the list during the search.
262  * It needs to increase entry refcount before returning to guarantee that the
263  * entry cannot be freed by others.
264  */
265 static struct ll_sa_entry *
266 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
267 {
268         struct ll_sa_entry *entry;
269
270         list_for_each_entry(entry, &sai->sai_entries, se_link) {
271                 if (entry->se_index == index) {
272                         LASSERT(atomic_read(&entry->se_refcount) > 0);
273                         atomic_inc(&entry->se_refcount);
274                         return entry;
275                 }
276                 if (entry->se_index > index)
277                         break;
278         }
279         return NULL;
280 }
281
282 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
283                                 struct ll_sa_entry *entry)
284 {
285         struct md_enqueue_info *minfo = entry->se_minfo;
286         struct ptlrpc_request  *req   = entry->se_req;
287
288         if (minfo) {
289                 entry->se_minfo = NULL;
290                 ll_intent_release(&minfo->mi_it);
291                 iput(minfo->mi_dir);
292                 kfree(minfo);
293         }
294
295         if (req) {
296                 entry->se_req = NULL;
297                 ptlrpc_req_finished(req);
298         }
299 }
300
301 static void ll_sa_entry_put(struct ll_statahead_info *sai,
302                             struct ll_sa_entry *entry)
303 {
304         if (atomic_dec_and_test(&entry->se_refcount)) {
305                 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
306                        entry->se_qstr.len, entry->se_qstr.name, entry,
307                        entry->se_index);
308
309                 LASSERT(list_empty(&entry->se_link));
310                 LASSERT(list_empty(&entry->se_list));
311                 LASSERT(list_empty(&entry->se_hash));
312
313                 ll_sa_entry_cleanup(sai, entry);
314                 iput(entry->se_inode);
315
316                 kfree(entry);
317                 atomic_dec(&sai->sai_cache_count);
318         }
319 }
320
321 static inline void
322 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
323 {
324         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
325
326         LASSERT(!list_empty(&entry->se_hash));
327         LASSERT(!list_empty(&entry->se_link));
328
329         ll_sa_entry_unhash(sai, entry);
330
331         spin_lock(&lli->lli_sa_lock);
332         entry->se_stat = SA_ENTRY_DEST;
333         list_del_init(&entry->se_link);
334         if (likely(!list_empty(&entry->se_list)))
335                 list_del_init(&entry->se_list);
336         spin_unlock(&lli->lli_sa_lock);
337
338         ll_sa_entry_put(sai, entry);
339 }
340
341 /*
342  * Delete it from sai_entries_stated list when fini.
343  */
344 static void
345 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
346 {
347         struct ll_sa_entry *pos, *next;
348
349         if (entry)
350                 do_sa_entry_fini(sai, entry);
351
352         /* drop old entry, only 'scanner' process does this, no need to lock */
353         list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
354                 if (!is_omitted_entry(sai, pos->se_index))
355                         break;
356                 do_sa_entry_fini(sai, pos);
357         }
358 }
359
360 /*
361  * Inside lli_sa_lock.
362  */
363 static void
364 do_sa_entry_to_stated(struct ll_statahead_info *sai,
365                       struct ll_sa_entry *entry, enum se_stat stat)
366 {
367         struct ll_sa_entry *se;
368         struct list_head         *pos = &sai->sai_entries_stated;
369
370         if (!list_empty(&entry->se_list))
371                 list_del_init(&entry->se_list);
372
373         list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
374                 if (se->se_index < entry->se_index) {
375                         pos = &se->se_list;
376                         break;
377                 }
378         }
379
380         list_add(&entry->se_list, pos);
381         entry->se_stat = stat;
382 }
383
384 /*
385  * Move entry to sai_entries_stated and sort with the index.
386  * \retval 1    -- entry to be destroyed.
387  * \retval 0    -- entry is inserted into stated list.
388  */
389 static int
390 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
391                       struct ll_sa_entry *entry, enum se_stat stat)
392 {
393         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
394         int                ret = 1;
395
396         ll_sa_entry_cleanup(sai, entry);
397
398         spin_lock(&lli->lli_sa_lock);
399         if (likely(entry->se_stat != SA_ENTRY_DEST)) {
400                 do_sa_entry_to_stated(sai, entry, stat);
401                 ret = 0;
402         }
403         spin_unlock(&lli->lli_sa_lock);
404
405         return ret;
406 }
407
408 /*
409  * Insert inode into the list of sai_entries_agl.
410  */
411 static void ll_agl_add(struct ll_statahead_info *sai,
412                        struct inode *inode, int index)
413 {
414         struct ll_inode_info *child  = ll_i2info(inode);
415         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
416         int                added  = 0;
417
418         spin_lock(&child->lli_agl_lock);
419         if (child->lli_agl_index == 0) {
420                 child->lli_agl_index = index;
421                 spin_unlock(&child->lli_agl_lock);
422
423                 LASSERT(list_empty(&child->lli_agl_list));
424
425                 igrab(inode);
426                 spin_lock(&parent->lli_agl_lock);
427                 if (list_empty(&sai->sai_entries_agl))
428                         added = 1;
429                 list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
430                 spin_unlock(&parent->lli_agl_lock);
431         } else {
432                 spin_unlock(&child->lli_agl_lock);
433         }
434
435         if (added > 0)
436                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
437 }
438
439 static struct ll_statahead_info *ll_sai_alloc(void)
440 {
441         struct ll_statahead_info *sai;
442         int                    i;
443
444         sai = kzalloc(sizeof(*sai), GFP_NOFS);
445         if (!sai)
446                 return NULL;
447
448         atomic_set(&sai->sai_refcount, 1);
449
450         spin_lock(&sai_generation_lock);
451         sai->sai_generation = ++sai_generation;
452         if (unlikely(sai_generation == 0))
453                 sai->sai_generation = ++sai_generation;
454         spin_unlock(&sai_generation_lock);
455
456         sai->sai_max = LL_SA_RPC_MIN;
457         sai->sai_index = 1;
458         init_waitqueue_head(&sai->sai_waitq);
459         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
460         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
461
462         INIT_LIST_HEAD(&sai->sai_entries);
463         INIT_LIST_HEAD(&sai->sai_entries_received);
464         INIT_LIST_HEAD(&sai->sai_entries_stated);
465         INIT_LIST_HEAD(&sai->sai_entries_agl);
466
467         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
468                 INIT_LIST_HEAD(&sai->sai_cache[i]);
469                 spin_lock_init(&sai->sai_cache_lock[i]);
470         }
471         atomic_set(&sai->sai_cache_count, 0);
472
473         return sai;
474 }
475
476 static inline struct ll_statahead_info *
477 ll_sai_get(struct ll_statahead_info *sai)
478 {
479         atomic_inc(&sai->sai_refcount);
480         return sai;
481 }
482
483 static void ll_sai_put(struct ll_statahead_info *sai)
484 {
485         struct inode     *inode = sai->sai_inode;
486         struct ll_inode_info *lli   = ll_i2info(inode);
487
488         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
489                 struct ll_sa_entry *entry, *next;
490
491                 if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
492                         /* It is race case, the interpret callback just hold
493                          * a reference count
494                          */
495                         spin_unlock(&lli->lli_sa_lock);
496                         return;
497                 }
498
499                 LASSERT(!lli->lli_opendir_key);
500                 LASSERT(thread_is_stopped(&sai->sai_thread));
501                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
502
503                 lli->lli_sai = NULL;
504                 lli->lli_opendir_pid = 0;
505                 spin_unlock(&lli->lli_sa_lock);
506
507                 if (sai->sai_sent > sai->sai_replied)
508                         CDEBUG(D_READA, "statahead for dir "DFID
509                               " does not finish: [sent:%llu] [replied:%llu]\n",
510                               PFID(&lli->lli_fid),
511                               sai->sai_sent, sai->sai_replied);
512
513                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
514                                          se_link)
515                         do_sa_entry_fini(sai, entry);
516
517                 LASSERT(list_empty(&sai->sai_entries));
518                 LASSERT(list_empty(&sai->sai_entries_received));
519                 LASSERT(list_empty(&sai->sai_entries_stated));
520
521                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
522                 LASSERT(list_empty(&sai->sai_entries_agl));
523
524                 iput(inode);
525                 kfree(sai);
526         }
527 }
528
529 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
530 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
531 {
532         struct ll_inode_info *lli   = ll_i2info(inode);
533         __u64            index = lli->lli_agl_index;
534         int                rc;
535
536         LASSERT(list_empty(&lli->lli_agl_list));
537
538         /* AGL maybe fall behind statahead with one entry */
539         if (is_omitted_entry(sai, index + 1)) {
540                 lli->lli_agl_index = 0;
541                 iput(inode);
542                 return;
543         }
544
545         /* Someone is in glimpse (sync or async), do nothing. */
546         rc = down_write_trylock(&lli->lli_glimpse_sem);
547         if (rc == 0) {
548                 lli->lli_agl_index = 0;
549                 iput(inode);
550                 return;
551         }
552
553         /*
554          * Someone triggered glimpse within 1 sec before.
555          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
556          *    if the lock is still cached on client, AGL needs to do nothing. If
557          *    it is cancelled by other client, AGL maybe cannot obtain new lock
558          *    for no glimpse callback triggered by AGL.
559          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
560          *    Under such case, it is quite possible that the OST will not grant
561          *    glimpse lock for AGL also.
562          * 3) The former glimpse failed, compared with other two cases, it is
563          *    relative rare. AGL can ignore such case, and it will not muchly
564          *    affect the performance.
565          */
566         if (lli->lli_glimpse_time != 0 &&
567             time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
568                 up_write(&lli->lli_glimpse_sem);
569                 lli->lli_agl_index = 0;
570                 iput(inode);
571                 return;
572         }
573
574         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
575                DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
576
577         cl_agl(inode);
578         lli->lli_agl_index = 0;
579         lli->lli_glimpse_time = cfs_time_current();
580         up_write(&lli->lli_glimpse_sem);
581
582         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
583                DFID", idx = %llu, rc = %d\n",
584                PFID(&lli->lli_fid), index, rc);
585
586         iput(inode);
587 }
588
589 static void ll_post_statahead(struct ll_statahead_info *sai)
590 {
591         struct inode       *dir   = sai->sai_inode;
592         struct inode       *child;
593         struct ll_inode_info   *lli   = ll_i2info(dir);
594         struct ll_sa_entry     *entry;
595         struct md_enqueue_info *minfo;
596         struct lookup_intent   *it;
597         struct ptlrpc_request  *req;
598         struct mdt_body *body;
599         int                  rc    = 0;
600
601         spin_lock(&lli->lli_sa_lock);
602         if (unlikely(list_empty(&sai->sai_entries_received))) {
603                 spin_unlock(&lli->lli_sa_lock);
604                 return;
605         }
606         entry = list_entry(sai->sai_entries_received.next,
607                            struct ll_sa_entry, se_list);
608         atomic_inc(&entry->se_refcount);
609         list_del_init(&entry->se_list);
610         spin_unlock(&lli->lli_sa_lock);
611
612         LASSERT(entry->se_handle != 0);
613
614         minfo = entry->se_minfo;
615         it = &minfo->mi_it;
616         req = entry->se_req;
617         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
618         if (!body) {
619                 rc = -EFAULT;
620                 goto out;
621         }
622
623         child = entry->se_inode;
624         if (!child) {
625                 /*
626                  * lookup.
627                  */
628                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
629
630                 /* XXX: No fid in reply, this is probably cross-ref case.
631                  * SA can't handle it yet.
632                  */
633                 if (body->valid & OBD_MD_MDS) {
634                         rc = -EAGAIN;
635                         goto out;
636                 }
637         } else {
638                 /*
639                  * revalidate.
640                  */
641                 /* unlinked and re-created with the same name */
642                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))) {
643                         entry->se_inode = NULL;
644                         iput(child);
645                         child = NULL;
646                 }
647         }
648
649         it->it_lock_handle = entry->se_handle;
650         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
651         if (rc != 1) {
652                 rc = -EAGAIN;
653                 goto out;
654         }
655
656         rc = ll_prep_inode(&child, req, dir->i_sb, it);
657         if (rc)
658                 goto out;
659
660         CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
661                ll_get_fsname(child->i_sb, NULL, 0),
662                PFID(ll_inode2fid(child)), child);
663         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
664
665         entry->se_inode = child;
666
667         if (agl_should_run(sai, child))
668                 ll_agl_add(sai, child, entry->se_index);
669
670 out:
671         /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
672          * reference count by calling "ll_intent_drop_lock()" in spite of the
673          * above operations failed or not. Do not worry about calling
674          * "ll_intent_drop_lock()" more than once.
675          */
676         rc = ll_sa_entry_to_stated(sai, entry,
677                                    rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
678         if (rc == 0 && entry->se_index == sai->sai_index_wait)
679                 wake_up(&sai->sai_waitq);
680         ll_sa_entry_put(sai, entry);
681 }
682
683 static int ll_statahead_interpret(struct ptlrpc_request *req,
684                                   struct md_enqueue_info *minfo, int rc)
685 {
686         struct lookup_intent     *it  = &minfo->mi_it;
687         struct inode         *dir = minfo->mi_dir;
688         struct ll_inode_info     *lli = ll_i2info(dir);
689         struct ll_statahead_info *sai = NULL;
690         struct ll_sa_entry       *entry;
691         __u64                     handle = 0;
692         int                    wakeup;
693
694         if (it_disposition(it, DISP_LOOKUP_NEG))
695                 rc = -ENOENT;
696
697         if (rc == 0) {
698                 /* release ibits lock ASAP to avoid deadlock when statahead
699                  * thread enqueues lock on parent in readdir and another
700                  * process enqueues lock on child with parent lock held, eg.
701                  * unlink.
702                  */
703                 handle = it->it_lock_handle;
704                 ll_intent_drop_lock(it);
705         }
706
707         spin_lock(&lli->lli_sa_lock);
708         /* stale entry */
709         if (unlikely(!lli->lli_sai ||
710                      lli->lli_sai->sai_generation != minfo->mi_generation)) {
711                 spin_unlock(&lli->lli_sa_lock);
712                 rc = -ESTALE;
713                 goto out;
714         } else {
715                 sai = ll_sai_get(lli->lli_sai);
716                 if (unlikely(!thread_is_running(&sai->sai_thread))) {
717                         sai->sai_replied++;
718                         spin_unlock(&lli->lli_sa_lock);
719                         rc = -EBADFD;
720                         goto out;
721                 }
722
723                 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
724                 if (!entry) {
725                         sai->sai_replied++;
726                         spin_unlock(&lli->lli_sa_lock);
727                         rc = -EIDRM;
728                         goto out;
729                 }
730
731                 if (rc != 0) {
732                         do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
733                         wakeup = (entry->se_index == sai->sai_index_wait);
734                 } else {
735                         entry->se_minfo = minfo;
736                         entry->se_req = ptlrpc_request_addref(req);
737                         /* Release the async ibits lock ASAP to avoid deadlock
738                          * when statahead thread tries to enqueue lock on parent
739                          * for readpage and other tries to enqueue lock on child
740                          * with parent's lock held, for example: unlink.
741                          */
742                         entry->se_handle = handle;
743                         wakeup = list_empty(&sai->sai_entries_received);
744                         list_add_tail(&entry->se_list,
745                                       &sai->sai_entries_received);
746                 }
747                 sai->sai_replied++;
748                 spin_unlock(&lli->lli_sa_lock);
749
750                 ll_sa_entry_put(sai, entry);
751                 if (wakeup)
752                         wake_up(&sai->sai_thread.t_ctl_waitq);
753         }
754
755 out:
756         if (rc != 0) {
757                 ll_intent_release(it);
758                 iput(dir);
759                 kfree(minfo);
760         }
761         if (sai)
762                 ll_sai_put(sai);
763         return rc;
764 }
765
766 static void sa_args_fini(struct md_enqueue_info *minfo,
767                          struct ldlm_enqueue_info *einfo)
768 {
769         LASSERT(minfo && einfo);
770         iput(minfo->mi_dir);
771         kfree(minfo);
772         kfree(einfo);
773 }
774
775 /**
776  * prepare arguments for async stat RPC.
777  */
778 static int sa_args_init(struct inode *dir, struct inode *child,
779                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
780                         struct ldlm_enqueue_info **pei)
781 {
782         struct qstr           *qstr = &entry->se_qstr;
783         struct ll_inode_info     *lli  = ll_i2info(dir);
784         struct md_enqueue_info   *minfo;
785         struct ldlm_enqueue_info *einfo;
786         struct md_op_data       *op_data;
787
788         einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
789         if (!einfo)
790                 return -ENOMEM;
791
792         minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
793         if (!minfo) {
794                 kfree(einfo);
795                 return -ENOMEM;
796         }
797
798         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
799                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
800         if (IS_ERR(op_data)) {
801                 kfree(einfo);
802                 kfree(minfo);
803                 return PTR_ERR(op_data);
804         }
805
806         minfo->mi_it.it_op = IT_GETATTR;
807         minfo->mi_dir = igrab(dir);
808         minfo->mi_cb = ll_statahead_interpret;
809         minfo->mi_generation = lli->lli_sai->sai_generation;
810         minfo->mi_cbdata = entry->se_index;
811
812         einfo->ei_type   = LDLM_IBITS;
813         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
814         einfo->ei_cb_bl  = ll_md_blocking_ast;
815         einfo->ei_cb_cp  = ldlm_completion_ast;
816         einfo->ei_cb_gl  = NULL;
817         einfo->ei_cbdata = NULL;
818
819         *pmi = minfo;
820         *pei = einfo;
821
822         return 0;
823 }
824
825 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
826 {
827         struct md_enqueue_info   *minfo;
828         struct ldlm_enqueue_info *einfo;
829         int                    rc;
830
831         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo);
832         if (rc)
833                 return rc;
834
835         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
836         if (rc < 0)
837                 sa_args_fini(minfo, einfo);
838
839         return rc;
840 }
841
842 /**
843  * similar to ll_revalidate_it().
844  * \retval      1 -- dentry valid
845  * \retval      0 -- will send stat-ahead request
846  * \retval others -- prepare stat-ahead request failed
847  */
848 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
849                             struct dentry *dentry)
850 {
851         struct inode         *inode = d_inode(dentry);
852         struct lookup_intent      it = { .it_op = IT_GETATTR,
853                                          .it_lock_handle = 0 };
854         struct md_enqueue_info   *minfo;
855         struct ldlm_enqueue_info *einfo;
856         int rc;
857
858         if (unlikely(!inode))
859                 return 1;
860
861         if (d_mountpoint(dentry))
862                 return 1;
863
864         entry->se_inode = igrab(inode);
865         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
866                                 NULL);
867         if (rc == 1) {
868                 entry->se_handle = it.it_lock_handle;
869                 ll_intent_release(&it);
870                 return 1;
871         }
872
873         rc = sa_args_init(dir, inode, entry, &minfo, &einfo);
874         if (rc) {
875                 entry->se_inode = NULL;
876                 iput(inode);
877                 return rc;
878         }
879
880         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
881         if (rc < 0) {
882                 entry->se_inode = NULL;
883                 iput(inode);
884                 sa_args_fini(minfo, einfo);
885         }
886
887         return rc;
888 }
889
890 static void ll_statahead_one(struct dentry *parent, const char *entry_name,
891                              int entry_name_len)
892 {
893         struct inode         *dir    = d_inode(parent);
894         struct ll_inode_info     *lli    = ll_i2info(dir);
895         struct ll_statahead_info *sai    = lli->lli_sai;
896         struct dentry       *dentry = NULL;
897         struct ll_sa_entry       *entry;
898         int                    rc;
899         int                    rc1;
900
901         entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
902                                   entry_name_len);
903         if (IS_ERR(entry))
904                 return;
905
906         dentry = d_lookup(parent, &entry->se_qstr);
907         if (!dentry) {
908                 rc = do_sa_lookup(dir, entry);
909         } else {
910                 rc = do_sa_revalidate(dir, entry, dentry);
911                 if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
912                         ll_agl_add(sai, d_inode(dentry), entry->se_index);
913
914                 dput(dentry);
915         }
916
917         if (rc) {
918                 rc1 = ll_sa_entry_to_stated(sai, entry,
919                                         rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
920                 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
921                         wake_up(&sai->sai_waitq);
922         } else {
923                 sai->sai_sent++;
924         }
925
926         sai->sai_index++;
927         /* drop one refcount on entry by ll_sa_entry_alloc */
928         ll_sa_entry_put(sai, entry);
929 }
930
931 static int ll_agl_thread(void *arg)
932 {
933         struct dentry       *parent = arg;
934         struct inode         *dir    = d_inode(parent);
935         struct ll_inode_info     *plli   = ll_i2info(dir);
936         struct ll_inode_info     *clli;
937         struct ll_sb_info       *sbi    = ll_i2sbi(dir);
938         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
939         struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
940         struct l_wait_info      lwi    = { 0 };
941
942         thread->t_pid = current_pid();
943         CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
944                sai, parent);
945
946         atomic_inc(&sbi->ll_agl_total);
947         spin_lock(&plli->lli_agl_lock);
948         sai->sai_agl_valid = 1;
949         if (thread_is_init(thread))
950                 /* If someone else has changed the thread state
951                  * (e.g. already changed to SVC_STOPPING), we can't just
952                  * blindly overwrite that setting.
953                  */
954                 thread_set_flags(thread, SVC_RUNNING);
955         spin_unlock(&plli->lli_agl_lock);
956         wake_up(&thread->t_ctl_waitq);
957
958         while (1) {
959                 l_wait_event(thread->t_ctl_waitq,
960                              !list_empty(&sai->sai_entries_agl) ||
961                              !thread_is_running(thread),
962                              &lwi);
963
964                 if (!thread_is_running(thread))
965                         break;
966
967                 spin_lock(&plli->lli_agl_lock);
968                 /* The statahead thread maybe help to process AGL entries,
969                  * so check whether list empty again.
970                  */
971                 if (!list_empty(&sai->sai_entries_agl)) {
972                         clli = list_entry(sai->sai_entries_agl.next,
973                                           struct ll_inode_info, lli_agl_list);
974                         list_del_init(&clli->lli_agl_list);
975                         spin_unlock(&plli->lli_agl_lock);
976                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
977                 } else {
978                         spin_unlock(&plli->lli_agl_lock);
979                 }
980         }
981
982         spin_lock(&plli->lli_agl_lock);
983         sai->sai_agl_valid = 0;
984         while (!list_empty(&sai->sai_entries_agl)) {
985                 clli = list_entry(sai->sai_entries_agl.next,
986                                   struct ll_inode_info, lli_agl_list);
987                 list_del_init(&clli->lli_agl_list);
988                 spin_unlock(&plli->lli_agl_lock);
989                 clli->lli_agl_index = 0;
990                 iput(&clli->lli_vfs_inode);
991                 spin_lock(&plli->lli_agl_lock);
992         }
993         thread_set_flags(thread, SVC_STOPPED);
994         spin_unlock(&plli->lli_agl_lock);
995         wake_up(&thread->t_ctl_waitq);
996         ll_sai_put(sai);
997         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
998                sai, parent);
999         return 0;
1000 }
1001
1002 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1003 {
1004         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1005         struct l_wait_info    lwi    = { 0 };
1006         struct ll_inode_info  *plli;
1007         struct task_struct *task;
1008
1009         CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1010                sai, parent);
1011
1012         plli = ll_i2info(d_inode(parent));
1013         task = kthread_run(ll_agl_thread, parent, "ll_agl_%u",
1014                            plli->lli_opendir_pid);
1015         if (IS_ERR(task)) {
1016                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1017                 thread_set_flags(thread, SVC_STOPPED);
1018                 return;
1019         }
1020
1021         l_wait_event(thread->t_ctl_waitq,
1022                      thread_is_running(thread) || thread_is_stopped(thread),
1023                      &lwi);
1024 }
1025
1026 static int ll_statahead_thread(void *arg)
1027 {
1028         struct dentry       *parent = arg;
1029         struct inode         *dir    = d_inode(parent);
1030         struct ll_inode_info     *plli   = ll_i2info(dir);
1031         struct ll_inode_info     *clli;
1032         struct ll_sb_info       *sbi    = ll_i2sbi(dir);
1033         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1034         struct ptlrpc_thread     *thread = &sai->sai_thread;
1035         struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1036         struct page           *page;
1037         __u64                pos    = 0;
1038         int                    first  = 0;
1039         int                    rc     = 0;
1040         struct ll_dir_chain       chain;
1041         struct l_wait_info      lwi    = { 0 };
1042
1043         thread->t_pid = current_pid();
1044         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1045                sai, parent);
1046
1047         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1048                 ll_start_agl(parent, sai);
1049
1050         atomic_inc(&sbi->ll_sa_total);
1051         spin_lock(&plli->lli_sa_lock);
1052         if (thread_is_init(thread))
1053                 /* If someone else has changed the thread state
1054                  * (e.g. already changed to SVC_STOPPING), we can't just
1055                  * blindly overwrite that setting.
1056                  */
1057                 thread_set_flags(thread, SVC_RUNNING);
1058         spin_unlock(&plli->lli_sa_lock);
1059         wake_up(&thread->t_ctl_waitq);
1060
1061         ll_dir_chain_init(&chain);
1062         page = ll_get_dir_page(dir, pos, &chain);
1063
1064         while (1) {
1065                 struct lu_dirpage *dp;
1066                 struct lu_dirent  *ent;
1067
1068                 if (IS_ERR(page)) {
1069                         rc = PTR_ERR(page);
1070                         CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
1071                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1072                                rc, plli->lli_opendir_pid);
1073                         goto out;
1074                 }
1075
1076                 dp = page_address(page);
1077                 for (ent = lu_dirent_start(dp); ent;
1078                      ent = lu_dirent_next(ent)) {
1079                         __u64 hash;
1080                         int namelen;
1081                         char *name;
1082
1083                         hash = le64_to_cpu(ent->lde_hash);
1084                         if (unlikely(hash < pos))
1085                                 /*
1086                                  * Skip until we find target hash value.
1087                                  */
1088                                 continue;
1089
1090                         namelen = le16_to_cpu(ent->lde_namelen);
1091                         if (unlikely(namelen == 0))
1092                                 /*
1093                                  * Skip dummy record.
1094                                  */
1095                                 continue;
1096
1097                         name = ent->lde_name;
1098                         if (name[0] == '.') {
1099                                 if (namelen == 1) {
1100                                         /*
1101                                          * skip "."
1102                                          */
1103                                         continue;
1104                                 } else if (name[1] == '.' && namelen == 2) {
1105                                         /*
1106                                          * skip ".."
1107                                          */
1108                                         continue;
1109                                 } else if (!sai->sai_ls_all) {
1110                                         /*
1111                                          * skip hidden files.
1112                                          */
1113                                         sai->sai_skip_hidden++;
1114                                         continue;
1115                                 }
1116                         }
1117
1118                         /*
1119                          * don't stat-ahead first entry.
1120                          */
1121                         if (unlikely(++first == 1))
1122                                 continue;
1123
1124 keep_it:
1125                         l_wait_event(thread->t_ctl_waitq,
1126                                      !sa_sent_full(sai) ||
1127                                      !list_empty(&sai->sai_entries_received) ||
1128                                      !list_empty(&sai->sai_entries_agl) ||
1129                                      !thread_is_running(thread),
1130                                      &lwi);
1131
1132 interpret_it:
1133                         while (!list_empty(&sai->sai_entries_received))
1134                                 ll_post_statahead(sai);
1135
1136                         if (unlikely(!thread_is_running(thread))) {
1137                                 ll_release_page(page, 0);
1138                                 rc = 0;
1139                                 goto out;
1140                         }
1141
1142                         /* If no window for metadata statahead, but there are
1143                          * some AGL entries to be triggered, then try to help
1144                          * to process the AGL entries.
1145                          */
1146                         if (sa_sent_full(sai)) {
1147                                 spin_lock(&plli->lli_agl_lock);
1148                                 while (!list_empty(&sai->sai_entries_agl)) {
1149                                         clli = list_entry(sai->sai_entries_agl.next,
1150                                                           struct ll_inode_info, lli_agl_list);
1151                                         list_del_init(&clli->lli_agl_list);
1152                                         spin_unlock(&plli->lli_agl_lock);
1153                                         ll_agl_trigger(&clli->lli_vfs_inode,
1154                                                        sai);
1155
1156                                         if (!list_empty(&sai->sai_entries_received))
1157                                                 goto interpret_it;
1158
1159                                         if (unlikely(
1160                                                 !thread_is_running(thread))) {
1161                                                 ll_release_page(page, 0);
1162                                                 rc = 0;
1163                                                 goto out;
1164                                         }
1165
1166                                         if (!sa_sent_full(sai))
1167                                                 goto do_it;
1168
1169                                         spin_lock(&plli->lli_agl_lock);
1170                                 }
1171                                 spin_unlock(&plli->lli_agl_lock);
1172
1173                                 goto keep_it;
1174                         }
1175
1176 do_it:
1177                         ll_statahead_one(parent, name, namelen);
1178                 }
1179                 pos = le64_to_cpu(dp->ldp_hash_end);
1180                 if (pos == MDS_DIR_END_OFF) {
1181                         /*
1182                          * End of directory reached.
1183                          */
1184                         ll_release_page(page, 0);
1185                         while (1) {
1186                                 l_wait_event(thread->t_ctl_waitq,
1187                                              !list_empty(&sai->sai_entries_received) ||
1188                                              sai->sai_sent == sai->sai_replied ||
1189                                              !thread_is_running(thread),
1190                                              &lwi);
1191
1192                                 while (!list_empty(&sai->sai_entries_received))
1193                                         ll_post_statahead(sai);
1194
1195                                 if (unlikely(!thread_is_running(thread))) {
1196                                         rc = 0;
1197                                         goto out;
1198                                 }
1199
1200                                 if (sai->sai_sent == sai->sai_replied &&
1201                                     list_empty(&sai->sai_entries_received))
1202                                         break;
1203                         }
1204
1205                         spin_lock(&plli->lli_agl_lock);
1206                         while (!list_empty(&sai->sai_entries_agl) &&
1207                                thread_is_running(thread)) {
1208                                 clli = list_entry(sai->sai_entries_agl.next,
1209                                                   struct ll_inode_info, lli_agl_list);
1210                                 list_del_init(&clli->lli_agl_list);
1211                                 spin_unlock(&plli->lli_agl_lock);
1212                                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1213                                 spin_lock(&plli->lli_agl_lock);
1214                         }
1215                         spin_unlock(&plli->lli_agl_lock);
1216
1217                         rc = 0;
1218                         goto out;
1219                 } else if (1) {
1220                         /*
1221                          * chain is exhausted.
1222                          * Normal case: continue to the next page.
1223                          */
1224                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1225                                               LDF_COLLIDE);
1226                         page = ll_get_dir_page(dir, pos, &chain);
1227                 } else {
1228                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1229                         ll_release_page(page, 1);
1230                         /*
1231                          * go into overflow page.
1232                          */
1233                 }
1234         }
1235
1236 out:
1237         if (sai->sai_agl_valid) {
1238                 spin_lock(&plli->lli_agl_lock);
1239                 thread_set_flags(agl_thread, SVC_STOPPING);
1240                 spin_unlock(&plli->lli_agl_lock);
1241                 wake_up(&agl_thread->t_ctl_waitq);
1242
1243                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1244                        sai, (unsigned int)agl_thread->t_pid);
1245                 l_wait_event(agl_thread->t_ctl_waitq,
1246                              thread_is_stopped(agl_thread),
1247                              &lwi);
1248         } else {
1249                 /* Set agl_thread flags anyway. */
1250                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1251         }
1252         ll_dir_chain_fini(&chain);
1253         spin_lock(&plli->lli_sa_lock);
1254         if (!list_empty(&sai->sai_entries_received)) {
1255                 thread_set_flags(thread, SVC_STOPPING);
1256                 spin_unlock(&plli->lli_sa_lock);
1257
1258                 /* To release the resources held by received entries. */
1259                 while (!list_empty(&sai->sai_entries_received))
1260                         ll_post_statahead(sai);
1261
1262                 spin_lock(&plli->lli_sa_lock);
1263         }
1264         thread_set_flags(thread, SVC_STOPPED);
1265         spin_unlock(&plli->lli_sa_lock);
1266         wake_up(&sai->sai_waitq);
1267         wake_up(&thread->t_ctl_waitq);
1268         ll_sai_put(sai);
1269         dput(parent);
1270         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1271                sai, parent);
1272         return rc;
1273 }
1274
1275 /**
1276  * called in ll_file_release().
1277  */
1278 void ll_stop_statahead(struct inode *dir, void *key)
1279 {
1280         struct ll_inode_info *lli = ll_i2info(dir);
1281
1282         if (unlikely(!key))
1283                 return;
1284
1285         spin_lock(&lli->lli_sa_lock);
1286         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1287                 spin_unlock(&lli->lli_sa_lock);
1288                 return;
1289         }
1290
1291         lli->lli_opendir_key = NULL;
1292
1293         if (lli->lli_sai) {
1294                 struct l_wait_info lwi = { 0 };
1295                 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1296
1297                 if (!thread_is_stopped(thread)) {
1298                         thread_set_flags(thread, SVC_STOPPING);
1299                         spin_unlock(&lli->lli_sa_lock);
1300                         wake_up(&thread->t_ctl_waitq);
1301
1302                         CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
1303                                lli->lli_sai, (unsigned int)thread->t_pid);
1304                         l_wait_event(thread->t_ctl_waitq,
1305                                      thread_is_stopped(thread),
1306                                      &lwi);
1307                 } else {
1308                         spin_unlock(&lli->lli_sa_lock);
1309                 }
1310
1311                 /*
1312                  * Put the ref which was held when first statahead_enter.
1313                  * It maybe not the last ref for some statahead requests
1314                  * maybe inflight.
1315                  */
1316                 ll_sai_put(lli->lli_sai);
1317         } else {
1318                 lli->lli_opendir_pid = 0;
1319                 spin_unlock(&lli->lli_sa_lock);
1320         }
1321 }
1322
1323 enum {
1324         /**
1325          * not first dirent, or is "."
1326          */
1327         LS_NONE_FIRST_DE = 0,
1328         /**
1329          * the first non-hidden dirent
1330          */
1331         LS_FIRST_DE,
1332         /**
1333          * the first hidden dirent, that is "."
1334          */
1335         LS_FIRST_DOT_DE
1336 };
1337
1338 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1339 {
1340         struct ll_dir_chain   chain;
1341         struct qstr       *target = &dentry->d_name;
1342         struct page       *page;
1343         __u64            pos    = 0;
1344         int                dot_de;
1345         int                rc     = LS_NONE_FIRST_DE;
1346
1347         ll_dir_chain_init(&chain);
1348         page = ll_get_dir_page(dir, pos, &chain);
1349
1350         while (1) {
1351                 struct lu_dirpage *dp;
1352                 struct lu_dirent  *ent;
1353
1354                 if (IS_ERR(page)) {
1355                         struct ll_inode_info *lli = ll_i2info(dir);
1356
1357                         rc = PTR_ERR(page);
1358                         CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
1359                                PFID(ll_inode2fid(dir)), pos,
1360                                rc, lli->lli_opendir_pid);
1361                         break;
1362                 }
1363
1364                 dp = page_address(page);
1365                 for (ent = lu_dirent_start(dp); ent;
1366                      ent = lu_dirent_next(ent)) {
1367                         __u64 hash;
1368                         int namelen;
1369                         char *name;
1370
1371                         hash = le64_to_cpu(ent->lde_hash);
1372                         /* The ll_get_dir_page() can return any page containing
1373                          * the given hash which may be not the start hash.
1374                          */
1375                         if (unlikely(hash < pos))
1376                                 continue;
1377
1378                         namelen = le16_to_cpu(ent->lde_namelen);
1379                         if (unlikely(namelen == 0))
1380                                 /*
1381                                  * skip dummy record.
1382                                  */
1383                                 continue;
1384
1385                         name = ent->lde_name;
1386                         if (name[0] == '.') {
1387                                 if (namelen == 1)
1388                                         /*
1389                                          * skip "."
1390                                          */
1391                                         continue;
1392                                 else if (name[1] == '.' && namelen == 2)
1393                                         /*
1394                                          * skip ".."
1395                                          */
1396                                         continue;
1397                                 else
1398                                         dot_de = 1;
1399                         } else {
1400                                 dot_de = 0;
1401                         }
1402
1403                         if (dot_de && target->name[0] != '.') {
1404                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1405                                        target->len, target->name,
1406                                        namelen, name);
1407                                 continue;
1408                         }
1409
1410                         if (target->len != namelen ||
1411                             memcmp(target->name, name, namelen) != 0)
1412                                 rc = LS_NONE_FIRST_DE;
1413                         else if (!dot_de)
1414                                 rc = LS_FIRST_DE;
1415                         else
1416                                 rc = LS_FIRST_DOT_DE;
1417
1418                         ll_release_page(page, 0);
1419                         goto out;
1420                 }
1421                 pos = le64_to_cpu(dp->ldp_hash_end);
1422                 if (pos == MDS_DIR_END_OFF) {
1423                         /*
1424                          * End of directory reached.
1425                          */
1426                         ll_release_page(page, 0);
1427                         break;
1428                 } else if (1) {
1429                         /*
1430                          * chain is exhausted
1431                          * Normal case: continue to the next page.
1432                          */
1433                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1434                                               LDF_COLLIDE);
1435                         page = ll_get_dir_page(dir, pos, &chain);
1436                 } else {
1437                         /*
1438                          * go into overflow page.
1439                          */
1440                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1441                         ll_release_page(page, 1);
1442                 }
1443         }
1444
1445 out:
1446         ll_dir_chain_fini(&chain);
1447         return rc;
1448 }
1449
1450 static void
1451 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1452 {
1453         struct ptlrpc_thread *thread = &sai->sai_thread;
1454         struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1455         int                hit;
1456
1457         if (entry && entry->se_stat == SA_ENTRY_SUCC)
1458                 hit = 1;
1459         else
1460                 hit = 0;
1461
1462         ll_sa_entry_fini(sai, entry);
1463         if (hit) {
1464                 sai->sai_hit++;
1465                 sai->sai_consecutive_miss = 0;
1466                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1467         } else {
1468                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1469
1470                 sai->sai_miss++;
1471                 sai->sai_consecutive_miss++;
1472                 if (sa_low_hit(sai) && thread_is_running(thread)) {
1473                         atomic_inc(&sbi->ll_sa_wrong);
1474                         CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1475                                PFID(&lli->lli_fid), sai->sai_hit,
1476                                sai->sai_miss, sai->sai_sent,
1477                                sai->sai_replied);
1478                         spin_lock(&lli->lli_sa_lock);
1479                         if (!thread_is_stopped(thread))
1480                                 thread_set_flags(thread, SVC_STOPPING);
1481                         spin_unlock(&lli->lli_sa_lock);
1482                 }
1483         }
1484
1485         if (!thread_is_stopped(thread))
1486                 wake_up(&thread->t_ctl_waitq);
1487 }
1488
1489 /**
1490  * Start statahead thread if this is the first dir entry.
1491  * Otherwise if a thread is started already, wait it until it is ahead of me.
1492  * \retval 1       -- find entry with lock in cache, the caller needs to do
1493  *                  nothing.
1494  * \retval 0       -- find entry in cache, but without lock, the caller needs
1495  *                  refresh from MDS.
1496  * \retval others  -- the caller need to process as non-statahead.
1497  */
1498 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1499                        int only_unplug)
1500 {
1501         struct ll_inode_info     *lli   = ll_i2info(dir);
1502         struct ll_statahead_info *sai   = lli->lli_sai;
1503         struct dentry       *parent;
1504         struct ll_sa_entry       *entry;
1505         struct ptlrpc_thread     *thread;
1506         struct l_wait_info      lwi   = { 0 };
1507         struct task_struct *task;
1508         int                    rc    = 0;
1509         struct ll_inode_info     *plli;
1510
1511         LASSERT(lli->lli_opendir_pid == current_pid());
1512
1513         if (sai) {
1514                 thread = &sai->sai_thread;
1515                 if (unlikely(thread_is_stopped(thread) &&
1516                              list_empty(&sai->sai_entries_stated))) {
1517                         /* to release resource */
1518                         ll_stop_statahead(dir, lli->lli_opendir_key);
1519                         return -EAGAIN;
1520                 }
1521
1522                 if ((*dentryp)->d_name.name[0] == '.') {
1523                         if (sai->sai_ls_all ||
1524                             sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1525                                 /*
1526                                  * Hidden dentry is the first one, or statahead
1527                                  * thread does not skip so many hidden dentries
1528                                  * before "sai_ls_all" enabled as below.
1529                                  */
1530                         } else {
1531                                 if (!sai->sai_ls_all)
1532                                         /*
1533                                          * It maybe because hidden dentry is not
1534                                          * the first one, "sai_ls_all" was not
1535                                          * set, then "ls -al" missed. Enable
1536                                          * "sai_ls_all" for such case.
1537                                          */
1538                                         sai->sai_ls_all = 1;
1539
1540                                 /*
1541                                  * Such "getattr" has been skipped before
1542                                  * "sai_ls_all" enabled as above.
1543                                  */
1544                                 sai->sai_miss_hidden++;
1545                                 return -EAGAIN;
1546                         }
1547                 }
1548
1549                 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1550                 if (!entry || only_unplug) {
1551                         ll_sai_unplug(sai, entry);
1552                         return entry ? 1 : -EAGAIN;
1553                 }
1554
1555                 if (!ll_sa_entry_stated(entry)) {
1556                         sai->sai_index_wait = entry->se_index;
1557                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1558                                                LWI_ON_SIGNAL_NOOP, NULL);
1559                         rc = l_wait_event(sai->sai_waitq,
1560                                           ll_sa_entry_stated(entry) ||
1561                                           thread_is_stopped(thread),
1562                                           &lwi);
1563                         if (rc < 0) {
1564                                 ll_sai_unplug(sai, entry);
1565                                 return -EAGAIN;
1566                         }
1567                 }
1568
1569                 if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
1570                         struct inode *inode = entry->se_inode;
1571                         struct lookup_intent it = { .it_op = IT_GETATTR,
1572                                                     .it_lock_handle =
1573                                                      entry->se_handle };
1574                         __u64 bits;
1575
1576                         rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1577                                                 ll_inode2fid(inode), &bits);
1578                         if (rc == 1) {
1579                                 if (!d_inode(*dentryp)) {
1580                                         struct dentry *alias;
1581
1582                                         alias = ll_splice_alias(inode,
1583                                                                 *dentryp);
1584                                         if (IS_ERR(alias)) {
1585                                                 ll_sai_unplug(sai, entry);
1586                                                 return PTR_ERR(alias);
1587                                         }
1588                                         *dentryp = alias;
1589                                 } else if (d_inode(*dentryp) != inode) {
1590                                         /* revalidate, but inode is recreated */
1591                                         CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
1592                                                ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
1593                                                *dentryp,
1594                                                PFID(ll_inode2fid(d_inode(*dentryp))),
1595                                                PFID(ll_inode2fid(inode)));
1596                                         ll_sai_unplug(sai, entry);
1597                                         return -ESTALE;
1598                                 } else {
1599                                         iput(inode);
1600                                 }
1601                                 entry->se_inode = NULL;
1602
1603                                 if ((bits & MDS_INODELOCK_LOOKUP) &&
1604                                     d_lustre_invalid(*dentryp))
1605                                         d_lustre_revalidate(*dentryp);
1606                                 ll_intent_release(&it);
1607                         }
1608                 }
1609
1610                 ll_sai_unplug(sai, entry);
1611                 return rc;
1612         }
1613
1614         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1615         rc = is_first_dirent(dir, *dentryp);
1616         if (rc == LS_NONE_FIRST_DE) {
1617                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1618                 rc = -EAGAIN;
1619                 goto out;
1620         }
1621
1622         sai = ll_sai_alloc();
1623         if (!sai) {
1624                 rc = -ENOMEM;
1625                 goto out;
1626         }
1627
1628         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1629         sai->sai_inode = igrab(dir);
1630         if (unlikely(!sai->sai_inode)) {
1631                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1632                       PFID(&lli->lli_fid));
1633                 rc = -ESTALE;
1634                 goto out;
1635         }
1636
1637         /* get parent reference count here, and put it in ll_statahead_thread */
1638         parent = dget((*dentryp)->d_parent);
1639         if (unlikely(sai->sai_inode != d_inode(parent))) {
1640                 struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
1641
1642                 CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
1643                       *dentryp,
1644                       PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1645                 dput(parent);
1646                 iput(sai->sai_inode);
1647                 rc = -EAGAIN;
1648                 goto out;
1649         }
1650
1651         CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1652                sai, parent);
1653
1654         /* The sai buffer already has one reference taken at allocation time,
1655          * but as soon as we expose the sai by attaching it to the lli that
1656          * default reference can be dropped by another thread calling
1657          * ll_stop_statahead. We need to take a local reference to protect
1658          * the sai buffer while we intend to access it.
1659          */
1660         ll_sai_get(sai);
1661         lli->lli_sai = sai;
1662
1663         plli = ll_i2info(d_inode(parent));
1664         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1665                            plli->lli_opendir_pid);
1666         thread = &sai->sai_thread;
1667         if (IS_ERR(task)) {
1668                 rc = PTR_ERR(task);
1669                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1670                 dput(parent);
1671                 lli->lli_opendir_key = NULL;
1672                 thread_set_flags(thread, SVC_STOPPED);
1673                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1674                 /* Drop both our own local reference and the default
1675                  * reference from allocation time.
1676                  */
1677                 ll_sai_put(sai);
1678                 ll_sai_put(sai);
1679                 LASSERT(!lli->lli_sai);
1680                 return -EAGAIN;
1681         }
1682
1683         l_wait_event(thread->t_ctl_waitq,
1684                      thread_is_running(thread) || thread_is_stopped(thread),
1685                      &lwi);
1686         ll_sai_put(sai);
1687
1688         /*
1689          * We don't stat-ahead for the first dirent since we are already in
1690          * lookup.
1691          */
1692         return -EAGAIN;
1693
1694 out:
1695         kfree(sai);
1696         spin_lock(&lli->lli_sa_lock);
1697         lli->lli_opendir_key = NULL;
1698         lli->lli_opendir_pid = 0;
1699         spin_unlock(&lli->lli_sa_lock);
1700         return rc;
1701 }