Merge branch 'work.const-qstr' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[cascardo/linux.git] / drivers / staging / lustre / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #include <linux/fs.h>
34 #include <linux/sched.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38
39 #define DEBUG_SUBSYSTEM S_LLITE
40
41 #include "../include/obd_support.h"
42 #include "../include/lustre_lite.h"
43 #include "../include/lustre_dlm.h"
44 #include "llite_internal.h"
45
46 #define SA_OMITTED_ENTRY_MAX 8ULL
47
48 enum se_stat {
49         /** negative values are for error cases */
50         SA_ENTRY_INIT = 0,      /** init entry */
51         SA_ENTRY_SUCC = 1,      /** stat succeed */
52         SA_ENTRY_INVA = 2,      /** invalid entry */
53         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
54 };
55
56 struct ll_sa_entry {
57         /* link into sai->sai_entries */
58         struct list_head              se_link;
59         /* link into sai->sai_entries_{received,stated} */
60         struct list_head              se_list;
61         /* link into sai hash table locally */
62         struct list_head              se_hash;
63         /* entry reference count */
64         atomic_t            se_refcount;
65         /* entry index in the sai */
66         __u64              se_index;
67         /* low layer ldlm lock handle */
68         __u64              se_handle;
69         /* entry status */
70         enum se_stat       se_stat;
71         /* entry size, contains name */
72         int                  se_size;
73         /* pointer to async getattr enqueue info */
74         struct md_enqueue_info *se_minfo;
75         /* pointer to the async getattr request */
76         struct ptlrpc_request  *se_req;
77         /* pointer to the target inode */
78         struct inode       *se_inode;
79         /* entry name */
80         struct qstr          se_qstr;
81 };
82
83 static unsigned int sai_generation;
84 static DEFINE_SPINLOCK(sai_generation_lock);
85
86 /*
87  * The entry only can be released by the caller, it is necessary to hold lock.
88  */
89 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
90 {
91         smp_rmb();
92         return (entry->se_stat != SA_ENTRY_INIT);
93 }
94
95 static inline int ll_sa_entry_hash(int val)
96 {
97         return val & LL_SA_CACHE_MASK;
98 }
99
100 /*
101  * Insert entry to hash SA table.
102  */
103 static inline void
104 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
105 {
106         int i = ll_sa_entry_hash(entry->se_qstr.hash);
107
108         spin_lock(&sai->sai_cache_lock[i]);
109         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
110         spin_unlock(&sai->sai_cache_lock[i]);
111 }
112
113 /*
114  * Remove entry from SA table.
115  */
116 static inline void
117 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
118 {
119         int i = ll_sa_entry_hash(entry->se_qstr.hash);
120
121         spin_lock(&sai->sai_cache_lock[i]);
122         list_del_init(&entry->se_hash);
123         spin_unlock(&sai->sai_cache_lock[i]);
124 }
125
126 static inline int agl_should_run(struct ll_statahead_info *sai,
127                                  struct inode *inode)
128 {
129         return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
130 }
131
132 static inline int sa_sent_full(struct ll_statahead_info *sai)
133 {
134         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
135 }
136
137 static inline int sa_received_empty(struct ll_statahead_info *sai)
138 {
139         return list_empty(&sai->sai_entries_received);
140 }
141
142 static inline int agl_list_empty(struct ll_statahead_info *sai)
143 {
144         return list_empty(&sai->sai_entries_agl);
145 }
146
147 /**
148  * (1) hit ratio less than 80%
149  * or
150  * (2) consecutive miss more than 8
151  * then means low hit.
152  */
153 static inline int sa_low_hit(struct ll_statahead_info *sai)
154 {
155         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
156                 (sai->sai_consecutive_miss > 8));
157 }
158
159 /*
160  * If the given index is behind of statahead window more than
161  * SA_OMITTED_ENTRY_MAX, then it is old.
162  */
163 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
164 {
165         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
166                  sai->sai_index);
167 }
168
169 /*
170  * Insert it into sai_entries tail when init.
171  */
172 static struct ll_sa_entry *
173 ll_sa_entry_alloc(struct dentry *parent,
174                   struct ll_statahead_info *sai, __u64 index,
175                   const char *name, int len)
176 {
177         struct ll_inode_info *lli;
178         struct ll_sa_entry   *entry;
179         int                entry_size;
180         char             *dname;
181
182         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
183         entry = kzalloc(entry_size, GFP_NOFS);
184         if (unlikely(!entry))
185                 return ERR_PTR(-ENOMEM);
186
187         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
188                len, name, entry, index);
189
190         entry->se_index = index;
191
192         /*
193          * Statahead entry reference rules:
194          *
195          * 1) When statahead entry is initialized, its reference is set as 2.
196          *    One reference is used by the directory scanner. When the scanner
197          *    searches the statahead cache for the given name, it can perform
198          *    lockless hash lookup (only the scanner can remove entry from hash
199          *    list), and once found, it needn't to call "atomic_inc()" for the
200          *    entry reference. So the performance is improved. After using the
201          *    statahead entry, the scanner will call "atomic_dec()" to drop the
202          *    reference held when initialization. If it is the last reference,
203          *    the statahead entry will be freed.
204          *
205          * 2) All other threads, including statahead thread and ptlrpcd thread,
206          *    when they process the statahead entry, the reference for target
207          *    should be held to guarantee the entry will not be released by the
208          *    directory scanner. After processing the entry, these threads will
209          *    drop the entry reference. If it is the last reference, the entry
210          *    will be freed.
211          *
212          *    The second reference when initializes the statahead entry is used
213          *    by the statahead thread, following the rule 2).
214          */
215         atomic_set(&entry->se_refcount, 2);
216         entry->se_stat = SA_ENTRY_INIT;
217         entry->se_size = entry_size;
218         dname = (char *)entry + sizeof(struct ll_sa_entry);
219         memcpy(dname, name, len);
220         dname[len] = 0;
221
222         entry->se_qstr.hash = full_name_hash(parent, name, len);
223         entry->se_qstr.len = len;
224         entry->se_qstr.name = dname;
225
226         lli = ll_i2info(sai->sai_inode);
227         spin_lock(&lli->lli_sa_lock);
228         list_add_tail(&entry->se_link, &sai->sai_entries);
229         INIT_LIST_HEAD(&entry->se_list);
230         ll_sa_entry_enhash(sai, entry);
231         spin_unlock(&lli->lli_sa_lock);
232
233         atomic_inc(&sai->sai_cache_count);
234
235         return entry;
236 }
237
238 /*
239  * Used by the directory scanner to search entry with name.
240  *
241  * Only the caller can remove the entry from hash, so it is unnecessary to hold
242  * hash lock. It is caller's duty to release the init refcount on the entry, so
243  * it is also unnecessary to increase refcount on the entry.
244  */
245 static struct ll_sa_entry *
246 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
247 {
248         struct ll_sa_entry *entry;
249         int i = ll_sa_entry_hash(qstr->hash);
250
251         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
252                 if (entry->se_qstr.hash == qstr->hash &&
253                     entry->se_qstr.len == qstr->len &&
254                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
255                         return entry;
256         }
257         return NULL;
258 }
259
260 /*
261  * Used by the async getattr request callback to find entry with index.
262  *
263  * Inside lli_sa_lock to prevent others to change the list during the search.
264  * It needs to increase entry refcount before returning to guarantee that the
265  * entry cannot be freed by others.
266  */
267 static struct ll_sa_entry *
268 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
269 {
270         struct ll_sa_entry *entry;
271
272         list_for_each_entry(entry, &sai->sai_entries, se_link) {
273                 if (entry->se_index == index) {
274                         LASSERT(atomic_read(&entry->se_refcount) > 0);
275                         atomic_inc(&entry->se_refcount);
276                         return entry;
277                 }
278                 if (entry->se_index > index)
279                         break;
280         }
281         return NULL;
282 }
283
284 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
285                                 struct ll_sa_entry *entry)
286 {
287         struct md_enqueue_info *minfo = entry->se_minfo;
288         struct ptlrpc_request  *req   = entry->se_req;
289
290         if (minfo) {
291                 entry->se_minfo = NULL;
292                 ll_intent_release(&minfo->mi_it);
293                 iput(minfo->mi_dir);
294                 kfree(minfo);
295         }
296
297         if (req) {
298                 entry->se_req = NULL;
299                 ptlrpc_req_finished(req);
300         }
301 }
302
303 static void ll_sa_entry_put(struct ll_statahead_info *sai,
304                             struct ll_sa_entry *entry)
305 {
306         if (atomic_dec_and_test(&entry->se_refcount)) {
307                 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
308                        entry->se_qstr.len, entry->se_qstr.name, entry,
309                        entry->se_index);
310
311                 LASSERT(list_empty(&entry->se_link));
312                 LASSERT(list_empty(&entry->se_list));
313                 LASSERT(list_empty(&entry->se_hash));
314
315                 ll_sa_entry_cleanup(sai, entry);
316                 iput(entry->se_inode);
317
318                 kfree(entry);
319                 atomic_dec(&sai->sai_cache_count);
320         }
321 }
322
323 static inline void
324 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
325 {
326         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
327
328         LASSERT(!list_empty(&entry->se_hash));
329         LASSERT(!list_empty(&entry->se_link));
330
331         ll_sa_entry_unhash(sai, entry);
332
333         spin_lock(&lli->lli_sa_lock);
334         entry->se_stat = SA_ENTRY_DEST;
335         list_del_init(&entry->se_link);
336         if (likely(!list_empty(&entry->se_list)))
337                 list_del_init(&entry->se_list);
338         spin_unlock(&lli->lli_sa_lock);
339
340         ll_sa_entry_put(sai, entry);
341 }
342
343 /*
344  * Delete it from sai_entries_stated list when fini.
345  */
346 static void
347 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
348 {
349         struct ll_sa_entry *pos, *next;
350
351         if (entry)
352                 do_sa_entry_fini(sai, entry);
353
354         /* drop old entry, only 'scanner' process does this, no need to lock */
355         list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
356                 if (!is_omitted_entry(sai, pos->se_index))
357                         break;
358                 do_sa_entry_fini(sai, pos);
359         }
360 }
361
362 /*
363  * Inside lli_sa_lock.
364  */
365 static void
366 do_sa_entry_to_stated(struct ll_statahead_info *sai,
367                       struct ll_sa_entry *entry, enum se_stat stat)
368 {
369         struct ll_sa_entry *se;
370         struct list_head         *pos = &sai->sai_entries_stated;
371
372         if (!list_empty(&entry->se_list))
373                 list_del_init(&entry->se_list);
374
375         list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
376                 if (se->se_index < entry->se_index) {
377                         pos = &se->se_list;
378                         break;
379                 }
380         }
381
382         list_add(&entry->se_list, pos);
383         entry->se_stat = stat;
384 }
385
386 /*
387  * Move entry to sai_entries_stated and sort with the index.
388  * \retval 1    -- entry to be destroyed.
389  * \retval 0    -- entry is inserted into stated list.
390  */
391 static int
392 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
393                       struct ll_sa_entry *entry, enum se_stat stat)
394 {
395         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
396         int                ret = 1;
397
398         ll_sa_entry_cleanup(sai, entry);
399
400         spin_lock(&lli->lli_sa_lock);
401         if (likely(entry->se_stat != SA_ENTRY_DEST)) {
402                 do_sa_entry_to_stated(sai, entry, stat);
403                 ret = 0;
404         }
405         spin_unlock(&lli->lli_sa_lock);
406
407         return ret;
408 }
409
410 /*
411  * Insert inode into the list of sai_entries_agl.
412  */
413 static void ll_agl_add(struct ll_statahead_info *sai,
414                        struct inode *inode, int index)
415 {
416         struct ll_inode_info *child  = ll_i2info(inode);
417         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
418         int                added  = 0;
419
420         spin_lock(&child->lli_agl_lock);
421         if (child->lli_agl_index == 0) {
422                 child->lli_agl_index = index;
423                 spin_unlock(&child->lli_agl_lock);
424
425                 LASSERT(list_empty(&child->lli_agl_list));
426
427                 igrab(inode);
428                 spin_lock(&parent->lli_agl_lock);
429                 if (list_empty(&sai->sai_entries_agl))
430                         added = 1;
431                 list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
432                 spin_unlock(&parent->lli_agl_lock);
433         } else {
434                 spin_unlock(&child->lli_agl_lock);
435         }
436
437         if (added > 0)
438                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
439 }
440
441 static struct ll_statahead_info *ll_sai_alloc(void)
442 {
443         struct ll_statahead_info *sai;
444         int                    i;
445
446         sai = kzalloc(sizeof(*sai), GFP_NOFS);
447         if (!sai)
448                 return NULL;
449
450         atomic_set(&sai->sai_refcount, 1);
451
452         spin_lock(&sai_generation_lock);
453         sai->sai_generation = ++sai_generation;
454         if (unlikely(sai_generation == 0))
455                 sai->sai_generation = ++sai_generation;
456         spin_unlock(&sai_generation_lock);
457
458         sai->sai_max = LL_SA_RPC_MIN;
459         sai->sai_index = 1;
460         init_waitqueue_head(&sai->sai_waitq);
461         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
462         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
463
464         INIT_LIST_HEAD(&sai->sai_entries);
465         INIT_LIST_HEAD(&sai->sai_entries_received);
466         INIT_LIST_HEAD(&sai->sai_entries_stated);
467         INIT_LIST_HEAD(&sai->sai_entries_agl);
468
469         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
470                 INIT_LIST_HEAD(&sai->sai_cache[i]);
471                 spin_lock_init(&sai->sai_cache_lock[i]);
472         }
473         atomic_set(&sai->sai_cache_count, 0);
474
475         return sai;
476 }
477
478 static inline struct ll_statahead_info *
479 ll_sai_get(struct ll_statahead_info *sai)
480 {
481         atomic_inc(&sai->sai_refcount);
482         return sai;
483 }
484
485 static void ll_sai_put(struct ll_statahead_info *sai)
486 {
487         struct inode     *inode = sai->sai_inode;
488         struct ll_inode_info *lli   = ll_i2info(inode);
489
490         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
491                 struct ll_sa_entry *entry, *next;
492
493                 if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
494                         /* It is race case, the interpret callback just hold
495                          * a reference count
496                          */
497                         spin_unlock(&lli->lli_sa_lock);
498                         return;
499                 }
500
501                 LASSERT(!lli->lli_opendir_key);
502                 LASSERT(thread_is_stopped(&sai->sai_thread));
503                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
504
505                 lli->lli_sai = NULL;
506                 lli->lli_opendir_pid = 0;
507                 spin_unlock(&lli->lli_sa_lock);
508
509                 if (sai->sai_sent > sai->sai_replied)
510                         CDEBUG(D_READA, "statahead for dir "DFID
511                               " does not finish: [sent:%llu] [replied:%llu]\n",
512                               PFID(&lli->lli_fid),
513                               sai->sai_sent, sai->sai_replied);
514
515                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
516                                          se_link)
517                         do_sa_entry_fini(sai, entry);
518
519                 LASSERT(list_empty(&sai->sai_entries));
520                 LASSERT(list_empty(&sai->sai_entries_received));
521                 LASSERT(list_empty(&sai->sai_entries_stated));
522
523                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
524                 LASSERT(list_empty(&sai->sai_entries_agl));
525
526                 iput(inode);
527                 kfree(sai);
528         }
529 }
530
531 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
532 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
533 {
534         struct ll_inode_info *lli   = ll_i2info(inode);
535         __u64            index = lli->lli_agl_index;
536         int                rc;
537
538         LASSERT(list_empty(&lli->lli_agl_list));
539
540         /* AGL maybe fall behind statahead with one entry */
541         if (is_omitted_entry(sai, index + 1)) {
542                 lli->lli_agl_index = 0;
543                 iput(inode);
544                 return;
545         }
546
547         /* Someone is in glimpse (sync or async), do nothing. */
548         rc = down_write_trylock(&lli->lli_glimpse_sem);
549         if (rc == 0) {
550                 lli->lli_agl_index = 0;
551                 iput(inode);
552                 return;
553         }
554
555         /*
556          * Someone triggered glimpse within 1 sec before.
557          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
558          *    if the lock is still cached on client, AGL needs to do nothing. If
559          *    it is cancelled by other client, AGL maybe cannot obtain new lock
560          *    for no glimpse callback triggered by AGL.
561          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
562          *    Under such case, it is quite possible that the OST will not grant
563          *    glimpse lock for AGL also.
564          * 3) The former glimpse failed, compared with other two cases, it is
565          *    relative rare. AGL can ignore such case, and it will not muchly
566          *    affect the performance.
567          */
568         if (lli->lli_glimpse_time != 0 &&
569             time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
570                 up_write(&lli->lli_glimpse_sem);
571                 lli->lli_agl_index = 0;
572                 iput(inode);
573                 return;
574         }
575
576         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
577                DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
578
579         cl_agl(inode);
580         lli->lli_agl_index = 0;
581         lli->lli_glimpse_time = cfs_time_current();
582         up_write(&lli->lli_glimpse_sem);
583
584         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
585                DFID", idx = %llu, rc = %d\n",
586                PFID(&lli->lli_fid), index, rc);
587
588         iput(inode);
589 }
590
591 static void ll_post_statahead(struct ll_statahead_info *sai)
592 {
593         struct inode       *dir   = sai->sai_inode;
594         struct inode       *child;
595         struct ll_inode_info   *lli   = ll_i2info(dir);
596         struct ll_sa_entry     *entry;
597         struct md_enqueue_info *minfo;
598         struct lookup_intent   *it;
599         struct ptlrpc_request  *req;
600         struct mdt_body *body;
601         int                  rc    = 0;
602
603         spin_lock(&lli->lli_sa_lock);
604         if (unlikely(list_empty(&sai->sai_entries_received))) {
605                 spin_unlock(&lli->lli_sa_lock);
606                 return;
607         }
608         entry = list_entry(sai->sai_entries_received.next,
609                            struct ll_sa_entry, se_list);
610         atomic_inc(&entry->se_refcount);
611         list_del_init(&entry->se_list);
612         spin_unlock(&lli->lli_sa_lock);
613
614         LASSERT(entry->se_handle != 0);
615
616         minfo = entry->se_minfo;
617         it = &minfo->mi_it;
618         req = entry->se_req;
619         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
620         if (!body) {
621                 rc = -EFAULT;
622                 goto out;
623         }
624
625         child = entry->se_inode;
626         if (!child) {
627                 /*
628                  * lookup.
629                  */
630                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
631
632                 /* XXX: No fid in reply, this is probably cross-ref case.
633                  * SA can't handle it yet.
634                  */
635                 if (body->valid & OBD_MD_MDS) {
636                         rc = -EAGAIN;
637                         goto out;
638                 }
639         } else {
640                 /*
641                  * revalidate.
642                  */
643                 /* unlinked and re-created with the same name */
644                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))) {
645                         entry->se_inode = NULL;
646                         iput(child);
647                         child = NULL;
648                 }
649         }
650
651         it->it_lock_handle = entry->se_handle;
652         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
653         if (rc != 1) {
654                 rc = -EAGAIN;
655                 goto out;
656         }
657
658         rc = ll_prep_inode(&child, req, dir->i_sb, it);
659         if (rc)
660                 goto out;
661
662         CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
663                ll_get_fsname(child->i_sb, NULL, 0),
664                PFID(ll_inode2fid(child)), child);
665         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
666
667         entry->se_inode = child;
668
669         if (agl_should_run(sai, child))
670                 ll_agl_add(sai, child, entry->se_index);
671
672 out:
673         /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
674          * reference count by calling "ll_intent_drop_lock()" in spite of the
675          * above operations failed or not. Do not worry about calling
676          * "ll_intent_drop_lock()" more than once.
677          */
678         rc = ll_sa_entry_to_stated(sai, entry,
679                                    rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
680         if (rc == 0 && entry->se_index == sai->sai_index_wait)
681                 wake_up(&sai->sai_waitq);
682         ll_sa_entry_put(sai, entry);
683 }
684
685 static int ll_statahead_interpret(struct ptlrpc_request *req,
686                                   struct md_enqueue_info *minfo, int rc)
687 {
688         struct lookup_intent     *it  = &minfo->mi_it;
689         struct inode         *dir = minfo->mi_dir;
690         struct ll_inode_info     *lli = ll_i2info(dir);
691         struct ll_statahead_info *sai = NULL;
692         struct ll_sa_entry       *entry;
693         __u64                     handle = 0;
694         int                    wakeup;
695
696         if (it_disposition(it, DISP_LOOKUP_NEG))
697                 rc = -ENOENT;
698
699         if (rc == 0) {
700                 /* release ibits lock ASAP to avoid deadlock when statahead
701                  * thread enqueues lock on parent in readdir and another
702                  * process enqueues lock on child with parent lock held, eg.
703                  * unlink.
704                  */
705                 handle = it->it_lock_handle;
706                 ll_intent_drop_lock(it);
707         }
708
709         spin_lock(&lli->lli_sa_lock);
710         /* stale entry */
711         if (unlikely(!lli->lli_sai ||
712                      lli->lli_sai->sai_generation != minfo->mi_generation)) {
713                 spin_unlock(&lli->lli_sa_lock);
714                 rc = -ESTALE;
715                 goto out;
716         } else {
717                 sai = ll_sai_get(lli->lli_sai);
718                 if (unlikely(!thread_is_running(&sai->sai_thread))) {
719                         sai->sai_replied++;
720                         spin_unlock(&lli->lli_sa_lock);
721                         rc = -EBADFD;
722                         goto out;
723                 }
724
725                 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
726                 if (!entry) {
727                         sai->sai_replied++;
728                         spin_unlock(&lli->lli_sa_lock);
729                         rc = -EIDRM;
730                         goto out;
731                 }
732
733                 if (rc != 0) {
734                         do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
735                         wakeup = (entry->se_index == sai->sai_index_wait);
736                 } else {
737                         entry->se_minfo = minfo;
738                         entry->se_req = ptlrpc_request_addref(req);
739                         /* Release the async ibits lock ASAP to avoid deadlock
740                          * when statahead thread tries to enqueue lock on parent
741                          * for readpage and other tries to enqueue lock on child
742                          * with parent's lock held, for example: unlink.
743                          */
744                         entry->se_handle = handle;
745                         wakeup = list_empty(&sai->sai_entries_received);
746                         list_add_tail(&entry->se_list,
747                                       &sai->sai_entries_received);
748                 }
749                 sai->sai_replied++;
750                 spin_unlock(&lli->lli_sa_lock);
751
752                 ll_sa_entry_put(sai, entry);
753                 if (wakeup)
754                         wake_up(&sai->sai_thread.t_ctl_waitq);
755         }
756
757 out:
758         if (rc != 0) {
759                 ll_intent_release(it);
760                 iput(dir);
761                 kfree(minfo);
762         }
763         if (sai)
764                 ll_sai_put(sai);
765         return rc;
766 }
767
768 static void sa_args_fini(struct md_enqueue_info *minfo,
769                          struct ldlm_enqueue_info *einfo)
770 {
771         LASSERT(minfo && einfo);
772         iput(minfo->mi_dir);
773         kfree(minfo);
774         kfree(einfo);
775 }
776
777 /**
778  * prepare arguments for async stat RPC.
779  */
780 static int sa_args_init(struct inode *dir, struct inode *child,
781                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
782                         struct ldlm_enqueue_info **pei)
783 {
784         const struct qstr      *qstr = &entry->se_qstr;
785         struct ll_inode_info     *lli  = ll_i2info(dir);
786         struct md_enqueue_info   *minfo;
787         struct ldlm_enqueue_info *einfo;
788         struct md_op_data       *op_data;
789
790         einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
791         if (!einfo)
792                 return -ENOMEM;
793
794         minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
795         if (!minfo) {
796                 kfree(einfo);
797                 return -ENOMEM;
798         }
799
800         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
801                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
802         if (IS_ERR(op_data)) {
803                 kfree(einfo);
804                 kfree(minfo);
805                 return PTR_ERR(op_data);
806         }
807
808         minfo->mi_it.it_op = IT_GETATTR;
809         minfo->mi_dir = igrab(dir);
810         minfo->mi_cb = ll_statahead_interpret;
811         minfo->mi_generation = lli->lli_sai->sai_generation;
812         minfo->mi_cbdata = entry->se_index;
813
814         einfo->ei_type   = LDLM_IBITS;
815         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
816         einfo->ei_cb_bl  = ll_md_blocking_ast;
817         einfo->ei_cb_cp  = ldlm_completion_ast;
818         einfo->ei_cb_gl  = NULL;
819         einfo->ei_cbdata = NULL;
820
821         *pmi = minfo;
822         *pei = einfo;
823
824         return 0;
825 }
826
827 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
828 {
829         struct md_enqueue_info   *minfo;
830         struct ldlm_enqueue_info *einfo;
831         int                    rc;
832
833         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo);
834         if (rc)
835                 return rc;
836
837         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
838         if (rc < 0)
839                 sa_args_fini(minfo, einfo);
840
841         return rc;
842 }
843
844 /**
845  * similar to ll_revalidate_it().
846  * \retval      1 -- dentry valid
847  * \retval      0 -- will send stat-ahead request
848  * \retval others -- prepare stat-ahead request failed
849  */
850 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
851                             struct dentry *dentry)
852 {
853         struct inode         *inode = d_inode(dentry);
854         struct lookup_intent      it = { .it_op = IT_GETATTR,
855                                          .it_lock_handle = 0 };
856         struct md_enqueue_info   *minfo;
857         struct ldlm_enqueue_info *einfo;
858         int rc;
859
860         if (unlikely(!inode))
861                 return 1;
862
863         if (d_mountpoint(dentry))
864                 return 1;
865
866         entry->se_inode = igrab(inode);
867         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
868                                 NULL);
869         if (rc == 1) {
870                 entry->se_handle = it.it_lock_handle;
871                 ll_intent_release(&it);
872                 return 1;
873         }
874
875         rc = sa_args_init(dir, inode, entry, &minfo, &einfo);
876         if (rc) {
877                 entry->se_inode = NULL;
878                 iput(inode);
879                 return rc;
880         }
881
882         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
883         if (rc < 0) {
884                 entry->se_inode = NULL;
885                 iput(inode);
886                 sa_args_fini(minfo, einfo);
887         }
888
889         return rc;
890 }
891
892 static void ll_statahead_one(struct dentry *parent, const char *entry_name,
893                              int entry_name_len)
894 {
895         struct inode         *dir    = d_inode(parent);
896         struct ll_inode_info     *lli    = ll_i2info(dir);
897         struct ll_statahead_info *sai    = lli->lli_sai;
898         struct dentry       *dentry = NULL;
899         struct ll_sa_entry       *entry;
900         int                    rc;
901         int                    rc1;
902
903         entry = ll_sa_entry_alloc(parent, sai, sai->sai_index, entry_name,
904                                   entry_name_len);
905         if (IS_ERR(entry))
906                 return;
907
908         dentry = d_lookup(parent, &entry->se_qstr);
909         if (!dentry) {
910                 rc = do_sa_lookup(dir, entry);
911         } else {
912                 rc = do_sa_revalidate(dir, entry, dentry);
913                 if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
914                         ll_agl_add(sai, d_inode(dentry), entry->se_index);
915
916                 dput(dentry);
917         }
918
919         if (rc) {
920                 rc1 = ll_sa_entry_to_stated(sai, entry,
921                                         rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
922                 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
923                         wake_up(&sai->sai_waitq);
924         } else {
925                 sai->sai_sent++;
926         }
927
928         sai->sai_index++;
929         /* drop one refcount on entry by ll_sa_entry_alloc */
930         ll_sa_entry_put(sai, entry);
931 }
932
933 static int ll_agl_thread(void *arg)
934 {
935         struct dentry       *parent = arg;
936         struct inode         *dir    = d_inode(parent);
937         struct ll_inode_info     *plli   = ll_i2info(dir);
938         struct ll_inode_info     *clli;
939         struct ll_sb_info       *sbi    = ll_i2sbi(dir);
940         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
941         struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
942         struct l_wait_info      lwi    = { 0 };
943
944         thread->t_pid = current_pid();
945         CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
946                sai, parent);
947
948         atomic_inc(&sbi->ll_agl_total);
949         spin_lock(&plli->lli_agl_lock);
950         sai->sai_agl_valid = 1;
951         if (thread_is_init(thread))
952                 /* If someone else has changed the thread state
953                  * (e.g. already changed to SVC_STOPPING), we can't just
954                  * blindly overwrite that setting.
955                  */
956                 thread_set_flags(thread, SVC_RUNNING);
957         spin_unlock(&plli->lli_agl_lock);
958         wake_up(&thread->t_ctl_waitq);
959
960         while (1) {
961                 l_wait_event(thread->t_ctl_waitq,
962                              !list_empty(&sai->sai_entries_agl) ||
963                              !thread_is_running(thread),
964                              &lwi);
965
966                 if (!thread_is_running(thread))
967                         break;
968
969                 spin_lock(&plli->lli_agl_lock);
970                 /* The statahead thread maybe help to process AGL entries,
971                  * so check whether list empty again.
972                  */
973                 if (!list_empty(&sai->sai_entries_agl)) {
974                         clli = list_entry(sai->sai_entries_agl.next,
975                                           struct ll_inode_info, lli_agl_list);
976                         list_del_init(&clli->lli_agl_list);
977                         spin_unlock(&plli->lli_agl_lock);
978                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
979                 } else {
980                         spin_unlock(&plli->lli_agl_lock);
981                 }
982         }
983
984         spin_lock(&plli->lli_agl_lock);
985         sai->sai_agl_valid = 0;
986         while (!list_empty(&sai->sai_entries_agl)) {
987                 clli = list_entry(sai->sai_entries_agl.next,
988                                   struct ll_inode_info, lli_agl_list);
989                 list_del_init(&clli->lli_agl_list);
990                 spin_unlock(&plli->lli_agl_lock);
991                 clli->lli_agl_index = 0;
992                 iput(&clli->lli_vfs_inode);
993                 spin_lock(&plli->lli_agl_lock);
994         }
995         thread_set_flags(thread, SVC_STOPPED);
996         spin_unlock(&plli->lli_agl_lock);
997         wake_up(&thread->t_ctl_waitq);
998         ll_sai_put(sai);
999         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1000                sai, parent);
1001         return 0;
1002 }
1003
1004 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1005 {
1006         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1007         struct l_wait_info    lwi    = { 0 };
1008         struct ll_inode_info  *plli;
1009         struct task_struct *task;
1010
1011         CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1012                sai, parent);
1013
1014         plli = ll_i2info(d_inode(parent));
1015         task = kthread_run(ll_agl_thread, parent, "ll_agl_%u",
1016                            plli->lli_opendir_pid);
1017         if (IS_ERR(task)) {
1018                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1019                 thread_set_flags(thread, SVC_STOPPED);
1020                 return;
1021         }
1022
1023         l_wait_event(thread->t_ctl_waitq,
1024                      thread_is_running(thread) || thread_is_stopped(thread),
1025                      &lwi);
1026 }
1027
1028 static int ll_statahead_thread(void *arg)
1029 {
1030         struct dentry       *parent = arg;
1031         struct inode         *dir    = d_inode(parent);
1032         struct ll_inode_info     *plli   = ll_i2info(dir);
1033         struct ll_inode_info     *clli;
1034         struct ll_sb_info       *sbi    = ll_i2sbi(dir);
1035         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1036         struct ptlrpc_thread     *thread = &sai->sai_thread;
1037         struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1038         struct page           *page;
1039         __u64                pos    = 0;
1040         int                    first  = 0;
1041         int                    rc     = 0;
1042         struct ll_dir_chain       chain;
1043         struct l_wait_info      lwi    = { 0 };
1044
1045         thread->t_pid = current_pid();
1046         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1047                sai, parent);
1048
1049         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1050                 ll_start_agl(parent, sai);
1051
1052         atomic_inc(&sbi->ll_sa_total);
1053         spin_lock(&plli->lli_sa_lock);
1054         if (thread_is_init(thread))
1055                 /* If someone else has changed the thread state
1056                  * (e.g. already changed to SVC_STOPPING), we can't just
1057                  * blindly overwrite that setting.
1058                  */
1059                 thread_set_flags(thread, SVC_RUNNING);
1060         spin_unlock(&plli->lli_sa_lock);
1061         wake_up(&thread->t_ctl_waitq);
1062
1063         ll_dir_chain_init(&chain);
1064         page = ll_get_dir_page(dir, pos, &chain);
1065
1066         while (1) {
1067                 struct lu_dirpage *dp;
1068                 struct lu_dirent  *ent;
1069
1070                 if (IS_ERR(page)) {
1071                         rc = PTR_ERR(page);
1072                         CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
1073                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1074                                rc, plli->lli_opendir_pid);
1075                         goto out;
1076                 }
1077
1078                 dp = page_address(page);
1079                 for (ent = lu_dirent_start(dp); ent;
1080                      ent = lu_dirent_next(ent)) {
1081                         __u64 hash;
1082                         int namelen;
1083                         char *name;
1084
1085                         hash = le64_to_cpu(ent->lde_hash);
1086                         if (unlikely(hash < pos))
1087                                 /*
1088                                  * Skip until we find target hash value.
1089                                  */
1090                                 continue;
1091
1092                         namelen = le16_to_cpu(ent->lde_namelen);
1093                         if (unlikely(namelen == 0))
1094                                 /*
1095                                  * Skip dummy record.
1096                                  */
1097                                 continue;
1098
1099                         name = ent->lde_name;
1100                         if (name[0] == '.') {
1101                                 if (namelen == 1) {
1102                                         /*
1103                                          * skip "."
1104                                          */
1105                                         continue;
1106                                 } else if (name[1] == '.' && namelen == 2) {
1107                                         /*
1108                                          * skip ".."
1109                                          */
1110                                         continue;
1111                                 } else if (!sai->sai_ls_all) {
1112                                         /*
1113                                          * skip hidden files.
1114                                          */
1115                                         sai->sai_skip_hidden++;
1116                                         continue;
1117                                 }
1118                         }
1119
1120                         /*
1121                          * don't stat-ahead first entry.
1122                          */
1123                         if (unlikely(++first == 1))
1124                                 continue;
1125
1126 keep_it:
1127                         l_wait_event(thread->t_ctl_waitq,
1128                                      !sa_sent_full(sai) ||
1129                                      !list_empty(&sai->sai_entries_received) ||
1130                                      !list_empty(&sai->sai_entries_agl) ||
1131                                      !thread_is_running(thread),
1132                                      &lwi);
1133
1134 interpret_it:
1135                         while (!list_empty(&sai->sai_entries_received))
1136                                 ll_post_statahead(sai);
1137
1138                         if (unlikely(!thread_is_running(thread))) {
1139                                 ll_release_page(page, 0);
1140                                 rc = 0;
1141                                 goto out;
1142                         }
1143
1144                         /* If no window for metadata statahead, but there are
1145                          * some AGL entries to be triggered, then try to help
1146                          * to process the AGL entries.
1147                          */
1148                         if (sa_sent_full(sai)) {
1149                                 spin_lock(&plli->lli_agl_lock);
1150                                 while (!list_empty(&sai->sai_entries_agl)) {
1151                                         clli = list_entry(sai->sai_entries_agl.next,
1152                                                           struct ll_inode_info, lli_agl_list);
1153                                         list_del_init(&clli->lli_agl_list);
1154                                         spin_unlock(&plli->lli_agl_lock);
1155                                         ll_agl_trigger(&clli->lli_vfs_inode,
1156                                                        sai);
1157
1158                                         if (!list_empty(&sai->sai_entries_received))
1159                                                 goto interpret_it;
1160
1161                                         if (unlikely(
1162                                                 !thread_is_running(thread))) {
1163                                                 ll_release_page(page, 0);
1164                                                 rc = 0;
1165                                                 goto out;
1166                                         }
1167
1168                                         if (!sa_sent_full(sai))
1169                                                 goto do_it;
1170
1171                                         spin_lock(&plli->lli_agl_lock);
1172                                 }
1173                                 spin_unlock(&plli->lli_agl_lock);
1174
1175                                 goto keep_it;
1176                         }
1177
1178 do_it:
1179                         ll_statahead_one(parent, name, namelen);
1180                 }
1181                 pos = le64_to_cpu(dp->ldp_hash_end);
1182                 if (pos == MDS_DIR_END_OFF) {
1183                         /*
1184                          * End of directory reached.
1185                          */
1186                         ll_release_page(page, 0);
1187                         while (1) {
1188                                 l_wait_event(thread->t_ctl_waitq,
1189                                              !list_empty(&sai->sai_entries_received) ||
1190                                              sai->sai_sent == sai->sai_replied ||
1191                                              !thread_is_running(thread),
1192                                              &lwi);
1193
1194                                 while (!list_empty(&sai->sai_entries_received))
1195                                         ll_post_statahead(sai);
1196
1197                                 if (unlikely(!thread_is_running(thread))) {
1198                                         rc = 0;
1199                                         goto out;
1200                                 }
1201
1202                                 if (sai->sai_sent == sai->sai_replied &&
1203                                     list_empty(&sai->sai_entries_received))
1204                                         break;
1205                         }
1206
1207                         spin_lock(&plli->lli_agl_lock);
1208                         while (!list_empty(&sai->sai_entries_agl) &&
1209                                thread_is_running(thread)) {
1210                                 clli = list_entry(sai->sai_entries_agl.next,
1211                                                   struct ll_inode_info, lli_agl_list);
1212                                 list_del_init(&clli->lli_agl_list);
1213                                 spin_unlock(&plli->lli_agl_lock);
1214                                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1215                                 spin_lock(&plli->lli_agl_lock);
1216                         }
1217                         spin_unlock(&plli->lli_agl_lock);
1218
1219                         rc = 0;
1220                         goto out;
1221                 } else if (1) {
1222                         /*
1223                          * chain is exhausted.
1224                          * Normal case: continue to the next page.
1225                          */
1226                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1227                                               LDF_COLLIDE);
1228                         page = ll_get_dir_page(dir, pos, &chain);
1229                 } else {
1230                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1231                         ll_release_page(page, 1);
1232                         /*
1233                          * go into overflow page.
1234                          */
1235                 }
1236         }
1237
1238 out:
1239         if (sai->sai_agl_valid) {
1240                 spin_lock(&plli->lli_agl_lock);
1241                 thread_set_flags(agl_thread, SVC_STOPPING);
1242                 spin_unlock(&plli->lli_agl_lock);
1243                 wake_up(&agl_thread->t_ctl_waitq);
1244
1245                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1246                        sai, (unsigned int)agl_thread->t_pid);
1247                 l_wait_event(agl_thread->t_ctl_waitq,
1248                              thread_is_stopped(agl_thread),
1249                              &lwi);
1250         } else {
1251                 /* Set agl_thread flags anyway. */
1252                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1253         }
1254         ll_dir_chain_fini(&chain);
1255         spin_lock(&plli->lli_sa_lock);
1256         if (!list_empty(&sai->sai_entries_received)) {
1257                 thread_set_flags(thread, SVC_STOPPING);
1258                 spin_unlock(&plli->lli_sa_lock);
1259
1260                 /* To release the resources held by received entries. */
1261                 while (!list_empty(&sai->sai_entries_received))
1262                         ll_post_statahead(sai);
1263
1264                 spin_lock(&plli->lli_sa_lock);
1265         }
1266         thread_set_flags(thread, SVC_STOPPED);
1267         spin_unlock(&plli->lli_sa_lock);
1268         wake_up(&sai->sai_waitq);
1269         wake_up(&thread->t_ctl_waitq);
1270         ll_sai_put(sai);
1271         dput(parent);
1272         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1273                sai, parent);
1274         return rc;
1275 }
1276
1277 /**
1278  * called in ll_file_release().
1279  */
1280 void ll_stop_statahead(struct inode *dir, void *key)
1281 {
1282         struct ll_inode_info *lli = ll_i2info(dir);
1283
1284         if (unlikely(!key))
1285                 return;
1286
1287         spin_lock(&lli->lli_sa_lock);
1288         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1289                 spin_unlock(&lli->lli_sa_lock);
1290                 return;
1291         }
1292
1293         lli->lli_opendir_key = NULL;
1294
1295         if (lli->lli_sai) {
1296                 struct l_wait_info lwi = { 0 };
1297                 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1298
1299                 if (!thread_is_stopped(thread)) {
1300                         thread_set_flags(thread, SVC_STOPPING);
1301                         spin_unlock(&lli->lli_sa_lock);
1302                         wake_up(&thread->t_ctl_waitq);
1303
1304                         CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
1305                                lli->lli_sai, (unsigned int)thread->t_pid);
1306                         l_wait_event(thread->t_ctl_waitq,
1307                                      thread_is_stopped(thread),
1308                                      &lwi);
1309                 } else {
1310                         spin_unlock(&lli->lli_sa_lock);
1311                 }
1312
1313                 /*
1314                  * Put the ref which was held when first statahead_enter.
1315                  * It maybe not the last ref for some statahead requests
1316                  * maybe inflight.
1317                  */
1318                 ll_sai_put(lli->lli_sai);
1319         } else {
1320                 lli->lli_opendir_pid = 0;
1321                 spin_unlock(&lli->lli_sa_lock);
1322         }
1323 }
1324
1325 enum {
1326         /**
1327          * not first dirent, or is "."
1328          */
1329         LS_NONE_FIRST_DE = 0,
1330         /**
1331          * the first non-hidden dirent
1332          */
1333         LS_FIRST_DE,
1334         /**
1335          * the first hidden dirent, that is "."
1336          */
1337         LS_FIRST_DOT_DE
1338 };
1339
1340 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1341 {
1342         struct ll_dir_chain   chain;
1343         const struct qstr  *target = &dentry->d_name;
1344         struct page       *page;
1345         __u64            pos    = 0;
1346         int                dot_de;
1347         int                rc     = LS_NONE_FIRST_DE;
1348
1349         ll_dir_chain_init(&chain);
1350         page = ll_get_dir_page(dir, pos, &chain);
1351
1352         while (1) {
1353                 struct lu_dirpage *dp;
1354                 struct lu_dirent  *ent;
1355
1356                 if (IS_ERR(page)) {
1357                         struct ll_inode_info *lli = ll_i2info(dir);
1358
1359                         rc = PTR_ERR(page);
1360                         CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
1361                                PFID(ll_inode2fid(dir)), pos,
1362                                rc, lli->lli_opendir_pid);
1363                         break;
1364                 }
1365
1366                 dp = page_address(page);
1367                 for (ent = lu_dirent_start(dp); ent;
1368                      ent = lu_dirent_next(ent)) {
1369                         __u64 hash;
1370                         int namelen;
1371                         char *name;
1372
1373                         hash = le64_to_cpu(ent->lde_hash);
1374                         /* The ll_get_dir_page() can return any page containing
1375                          * the given hash which may be not the start hash.
1376                          */
1377                         if (unlikely(hash < pos))
1378                                 continue;
1379
1380                         namelen = le16_to_cpu(ent->lde_namelen);
1381                         if (unlikely(namelen == 0))
1382                                 /*
1383                                  * skip dummy record.
1384                                  */
1385                                 continue;
1386
1387                         name = ent->lde_name;
1388                         if (name[0] == '.') {
1389                                 if (namelen == 1)
1390                                         /*
1391                                          * skip "."
1392                                          */
1393                                         continue;
1394                                 else if (name[1] == '.' && namelen == 2)
1395                                         /*
1396                                          * skip ".."
1397                                          */
1398                                         continue;
1399                                 else
1400                                         dot_de = 1;
1401                         } else {
1402                                 dot_de = 0;
1403                         }
1404
1405                         if (dot_de && target->name[0] != '.') {
1406                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1407                                        target->len, target->name,
1408                                        namelen, name);
1409                                 continue;
1410                         }
1411
1412                         if (target->len != namelen ||
1413                             memcmp(target->name, name, namelen) != 0)
1414                                 rc = LS_NONE_FIRST_DE;
1415                         else if (!dot_de)
1416                                 rc = LS_FIRST_DE;
1417                         else
1418                                 rc = LS_FIRST_DOT_DE;
1419
1420                         ll_release_page(page, 0);
1421                         goto out;
1422                 }
1423                 pos = le64_to_cpu(dp->ldp_hash_end);
1424                 if (pos == MDS_DIR_END_OFF) {
1425                         /*
1426                          * End of directory reached.
1427                          */
1428                         ll_release_page(page, 0);
1429                         break;
1430                 } else if (1) {
1431                         /*
1432                          * chain is exhausted
1433                          * Normal case: continue to the next page.
1434                          */
1435                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1436                                               LDF_COLLIDE);
1437                         page = ll_get_dir_page(dir, pos, &chain);
1438                 } else {
1439                         /*
1440                          * go into overflow page.
1441                          */
1442                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1443                         ll_release_page(page, 1);
1444                 }
1445         }
1446
1447 out:
1448         ll_dir_chain_fini(&chain);
1449         return rc;
1450 }
1451
1452 static void
1453 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1454 {
1455         struct ptlrpc_thread *thread = &sai->sai_thread;
1456         struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1457         int                hit;
1458
1459         if (entry && entry->se_stat == SA_ENTRY_SUCC)
1460                 hit = 1;
1461         else
1462                 hit = 0;
1463
1464         ll_sa_entry_fini(sai, entry);
1465         if (hit) {
1466                 sai->sai_hit++;
1467                 sai->sai_consecutive_miss = 0;
1468                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1469         } else {
1470                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1471
1472                 sai->sai_miss++;
1473                 sai->sai_consecutive_miss++;
1474                 if (sa_low_hit(sai) && thread_is_running(thread)) {
1475                         atomic_inc(&sbi->ll_sa_wrong);
1476                         CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1477                                PFID(&lli->lli_fid), sai->sai_hit,
1478                                sai->sai_miss, sai->sai_sent,
1479                                sai->sai_replied);
1480                         spin_lock(&lli->lli_sa_lock);
1481                         if (!thread_is_stopped(thread))
1482                                 thread_set_flags(thread, SVC_STOPPING);
1483                         spin_unlock(&lli->lli_sa_lock);
1484                 }
1485         }
1486
1487         if (!thread_is_stopped(thread))
1488                 wake_up(&thread->t_ctl_waitq);
1489 }
1490
1491 /**
1492  * Start statahead thread if this is the first dir entry.
1493  * Otherwise if a thread is started already, wait it until it is ahead of me.
1494  * \retval 1       -- find entry with lock in cache, the caller needs to do
1495  *                  nothing.
1496  * \retval 0       -- find entry in cache, but without lock, the caller needs
1497  *                  refresh from MDS.
1498  * \retval others  -- the caller need to process as non-statahead.
1499  */
1500 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1501                        int only_unplug)
1502 {
1503         struct ll_inode_info     *lli   = ll_i2info(dir);
1504         struct ll_statahead_info *sai   = lli->lli_sai;
1505         struct dentry       *parent;
1506         struct ll_sa_entry       *entry;
1507         struct ptlrpc_thread     *thread;
1508         struct l_wait_info      lwi   = { 0 };
1509         struct task_struct *task;
1510         int                    rc    = 0;
1511         struct ll_inode_info     *plli;
1512
1513         LASSERT(lli->lli_opendir_pid == current_pid());
1514
1515         if (sai) {
1516                 thread = &sai->sai_thread;
1517                 if (unlikely(thread_is_stopped(thread) &&
1518                              list_empty(&sai->sai_entries_stated))) {
1519                         /* to release resource */
1520                         ll_stop_statahead(dir, lli->lli_opendir_key);
1521                         return -EAGAIN;
1522                 }
1523
1524                 if ((*dentryp)->d_name.name[0] == '.') {
1525                         if (sai->sai_ls_all ||
1526                             sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1527                                 /*
1528                                  * Hidden dentry is the first one, or statahead
1529                                  * thread does not skip so many hidden dentries
1530                                  * before "sai_ls_all" enabled as below.
1531                                  */
1532                         } else {
1533                                 if (!sai->sai_ls_all)
1534                                         /*
1535                                          * It maybe because hidden dentry is not
1536                                          * the first one, "sai_ls_all" was not
1537                                          * set, then "ls -al" missed. Enable
1538                                          * "sai_ls_all" for such case.
1539                                          */
1540                                         sai->sai_ls_all = 1;
1541
1542                                 /*
1543                                  * Such "getattr" has been skipped before
1544                                  * "sai_ls_all" enabled as above.
1545                                  */
1546                                 sai->sai_miss_hidden++;
1547                                 return -EAGAIN;
1548                         }
1549                 }
1550
1551                 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1552                 if (!entry || only_unplug) {
1553                         ll_sai_unplug(sai, entry);
1554                         return entry ? 1 : -EAGAIN;
1555                 }
1556
1557                 if (!ll_sa_entry_stated(entry)) {
1558                         sai->sai_index_wait = entry->se_index;
1559                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1560                                                LWI_ON_SIGNAL_NOOP, NULL);
1561                         rc = l_wait_event(sai->sai_waitq,
1562                                           ll_sa_entry_stated(entry) ||
1563                                           thread_is_stopped(thread),
1564                                           &lwi);
1565                         if (rc < 0) {
1566                                 ll_sai_unplug(sai, entry);
1567                                 return -EAGAIN;
1568                         }
1569                 }
1570
1571                 if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
1572                         struct inode *inode = entry->se_inode;
1573                         struct lookup_intent it = { .it_op = IT_GETATTR,
1574                                                     .it_lock_handle =
1575                                                      entry->se_handle };
1576                         __u64 bits;
1577
1578                         rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1579                                                 ll_inode2fid(inode), &bits);
1580                         if (rc == 1) {
1581                                 if (!d_inode(*dentryp)) {
1582                                         struct dentry *alias;
1583
1584                                         alias = ll_splice_alias(inode,
1585                                                                 *dentryp);
1586                                         if (IS_ERR(alias)) {
1587                                                 ll_sai_unplug(sai, entry);
1588                                                 return PTR_ERR(alias);
1589                                         }
1590                                         *dentryp = alias;
1591                                 } else if (d_inode(*dentryp) != inode) {
1592                                         /* revalidate, but inode is recreated */
1593                                         CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
1594                                                ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
1595                                                *dentryp,
1596                                                PFID(ll_inode2fid(d_inode(*dentryp))),
1597                                                PFID(ll_inode2fid(inode)));
1598                                         ll_sai_unplug(sai, entry);
1599                                         return -ESTALE;
1600                                 } else {
1601                                         iput(inode);
1602                                 }
1603                                 entry->se_inode = NULL;
1604
1605                                 if ((bits & MDS_INODELOCK_LOOKUP) &&
1606                                     d_lustre_invalid(*dentryp))
1607                                         d_lustre_revalidate(*dentryp);
1608                                 ll_intent_release(&it);
1609                         }
1610                 }
1611
1612                 ll_sai_unplug(sai, entry);
1613                 return rc;
1614         }
1615
1616         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1617         rc = is_first_dirent(dir, *dentryp);
1618         if (rc == LS_NONE_FIRST_DE) {
1619                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1620                 rc = -EAGAIN;
1621                 goto out;
1622         }
1623
1624         sai = ll_sai_alloc();
1625         if (!sai) {
1626                 rc = -ENOMEM;
1627                 goto out;
1628         }
1629
1630         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1631         sai->sai_inode = igrab(dir);
1632         if (unlikely(!sai->sai_inode)) {
1633                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1634                       PFID(&lli->lli_fid));
1635                 rc = -ESTALE;
1636                 goto out;
1637         }
1638
1639         /* get parent reference count here, and put it in ll_statahead_thread */
1640         parent = dget((*dentryp)->d_parent);
1641         if (unlikely(sai->sai_inode != d_inode(parent))) {
1642                 struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
1643
1644                 CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
1645                       *dentryp,
1646                       PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1647                 dput(parent);
1648                 iput(sai->sai_inode);
1649                 rc = -EAGAIN;
1650                 goto out;
1651         }
1652
1653         CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1654                sai, parent);
1655
1656         /* The sai buffer already has one reference taken at allocation time,
1657          * but as soon as we expose the sai by attaching it to the lli that
1658          * default reference can be dropped by another thread calling
1659          * ll_stop_statahead. We need to take a local reference to protect
1660          * the sai buffer while we intend to access it.
1661          */
1662         ll_sai_get(sai);
1663         lli->lli_sai = sai;
1664
1665         plli = ll_i2info(d_inode(parent));
1666         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1667                            plli->lli_opendir_pid);
1668         thread = &sai->sai_thread;
1669         if (IS_ERR(task)) {
1670                 rc = PTR_ERR(task);
1671                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1672                 dput(parent);
1673                 lli->lli_opendir_key = NULL;
1674                 thread_set_flags(thread, SVC_STOPPED);
1675                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1676                 /* Drop both our own local reference and the default
1677                  * reference from allocation time.
1678                  */
1679                 ll_sai_put(sai);
1680                 ll_sai_put(sai);
1681                 LASSERT(!lli->lli_sai);
1682                 return -EAGAIN;
1683         }
1684
1685         l_wait_event(thread->t_ctl_waitq,
1686                      thread_is_running(thread) || thread_is_stopped(thread),
1687                      &lwi);
1688         ll_sai_put(sai);
1689
1690         /*
1691          * We don't stat-ahead for the first dirent since we are already in
1692          * lookup.
1693          */
1694         return -EAGAIN;
1695
1696 out:
1697         kfree(sai);
1698         spin_lock(&lli->lli_sa_lock);
1699         lli->lli_opendir_key = NULL;
1700         lli->lli_opendir_pid = 0;
1701         spin_unlock(&lli->lli_sa_lock);
1702         return rc;
1703 }