]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/llite/statahead.c
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 #include <linux/sched.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
39 #define DEBUG_SUBSYSTEM S_LLITE
41 #include "../include/obd_support.h"
42 #include "../include/lustre_lite.h"
43 #include "../include/lustre_dlm.h"
44 #include "llite_internal.h"
46 #define SA_OMITTED_ENTRY_MAX 8ULL
49 /** negative values are for error cases */
50 SA_ENTRY_INIT
= 0, /** init entry */
51 SA_ENTRY_SUCC
= 1, /** stat succeed */
52 SA_ENTRY_INVA
= 2, /** invalid entry */
53 SA_ENTRY_DEST
= 3, /** entry to be destroyed */
57 /* link into sai->sai_entries */
58 struct list_head se_link
;
59 /* link into sai->sai_entries_{received,stated} */
60 struct list_head se_list
;
61 /* link into sai hash table locally */
62 struct list_head se_hash
;
63 /* entry reference count */
65 /* entry index in the sai */
67 /* low layer ldlm lock handle */
71 /* entry size, contains name */
73 /* pointer to async getattr enqueue info */
74 struct md_enqueue_info
*se_minfo
;
75 /* pointer to the async getattr request */
76 struct ptlrpc_request
*se_req
;
77 /* pointer to the target inode */
78 struct inode
*se_inode
;
83 static unsigned int sai_generation
;
84 static DEFINE_SPINLOCK(sai_generation_lock
);
87 * The entry only can be released by the caller, it is necessary to hold lock.
89 static inline int ll_sa_entry_stated(struct ll_sa_entry
*entry
)
92 return (entry
->se_stat
!= SA_ENTRY_INIT
);
95 static inline int ll_sa_entry_hash(int val
)
97 return val
& LL_SA_CACHE_MASK
;
101 * Insert entry to hash SA table.
104 ll_sa_entry_enhash(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
106 int i
= ll_sa_entry_hash(entry
->se_qstr
.hash
);
108 spin_lock(&sai
->sai_cache_lock
[i
]);
109 list_add_tail(&entry
->se_hash
, &sai
->sai_cache
[i
]);
110 spin_unlock(&sai
->sai_cache_lock
[i
]);
114 * Remove entry from SA table.
117 ll_sa_entry_unhash(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
119 int i
= ll_sa_entry_hash(entry
->se_qstr
.hash
);
121 spin_lock(&sai
->sai_cache_lock
[i
]);
122 list_del_init(&entry
->se_hash
);
123 spin_unlock(&sai
->sai_cache_lock
[i
]);
126 static inline int agl_should_run(struct ll_statahead_info
*sai
,
129 return (inode
&& S_ISREG(inode
->i_mode
) && sai
->sai_agl_valid
);
132 static inline int sa_sent_full(struct ll_statahead_info
*sai
)
134 return atomic_read(&sai
->sai_cache_count
) >= sai
->sai_max
;
137 static inline int sa_received_empty(struct ll_statahead_info
*sai
)
139 return list_empty(&sai
->sai_entries_received
);
142 static inline int agl_list_empty(struct ll_statahead_info
*sai
)
144 return list_empty(&sai
->sai_entries_agl
);
148 * (1) hit ratio less than 80%
150 * (2) consecutive miss more than 8
151 * then means low hit.
153 static inline int sa_low_hit(struct ll_statahead_info
*sai
)
155 return ((sai
->sai_hit
> 7 && sai
->sai_hit
< 4 * sai
->sai_miss
) ||
156 (sai
->sai_consecutive_miss
> 8));
160 * If the given index is behind of statahead window more than
161 * SA_OMITTED_ENTRY_MAX, then it is old.
163 static inline int is_omitted_entry(struct ll_statahead_info
*sai
, __u64 index
)
165 return ((__u64
)sai
->sai_max
+ index
+ SA_OMITTED_ENTRY_MAX
<
170 * Insert it into sai_entries tail when init.
172 static struct ll_sa_entry
*
173 ll_sa_entry_alloc(struct dentry
*parent
,
174 struct ll_statahead_info
*sai
, __u64 index
,
175 const char *name
, int len
)
177 struct ll_inode_info
*lli
;
178 struct ll_sa_entry
*entry
;
182 entry_size
= sizeof(struct ll_sa_entry
) + (len
& ~3) + 4;
183 entry
= kzalloc(entry_size
, GFP_NOFS
);
184 if (unlikely(!entry
))
185 return ERR_PTR(-ENOMEM
);
187 CDEBUG(D_READA
, "alloc sa entry %.*s(%p) index %llu\n",
188 len
, name
, entry
, index
);
190 entry
->se_index
= index
;
193 * Statahead entry reference rules:
195 * 1) When statahead entry is initialized, its reference is set as 2.
196 * One reference is used by the directory scanner. When the scanner
197 * searches the statahead cache for the given name, it can perform
198 * lockless hash lookup (only the scanner can remove entry from hash
199 * list), and once found, it needn't to call "atomic_inc()" for the
200 * entry reference. So the performance is improved. After using the
201 * statahead entry, the scanner will call "atomic_dec()" to drop the
202 * reference held when initialization. If it is the last reference,
203 * the statahead entry will be freed.
205 * 2) All other threads, including statahead thread and ptlrpcd thread,
206 * when they process the statahead entry, the reference for target
207 * should be held to guarantee the entry will not be released by the
208 * directory scanner. After processing the entry, these threads will
209 * drop the entry reference. If it is the last reference, the entry
212 * The second reference when initializes the statahead entry is used
213 * by the statahead thread, following the rule 2).
215 atomic_set(&entry
->se_refcount
, 2);
216 entry
->se_stat
= SA_ENTRY_INIT
;
217 entry
->se_size
= entry_size
;
218 dname
= (char *)entry
+ sizeof(struct ll_sa_entry
);
219 memcpy(dname
, name
, len
);
222 entry
->se_qstr
.hash
= full_name_hash(parent
, name
, len
);
223 entry
->se_qstr
.len
= len
;
224 entry
->se_qstr
.name
= dname
;
226 lli
= ll_i2info(sai
->sai_inode
);
227 spin_lock(&lli
->lli_sa_lock
);
228 list_add_tail(&entry
->se_link
, &sai
->sai_entries
);
229 INIT_LIST_HEAD(&entry
->se_list
);
230 ll_sa_entry_enhash(sai
, entry
);
231 spin_unlock(&lli
->lli_sa_lock
);
233 atomic_inc(&sai
->sai_cache_count
);
239 * Used by the directory scanner to search entry with name.
241 * Only the caller can remove the entry from hash, so it is unnecessary to hold
242 * hash lock. It is caller's duty to release the init refcount on the entry, so
243 * it is also unnecessary to increase refcount on the entry.
245 static struct ll_sa_entry
*
246 ll_sa_entry_get_byname(struct ll_statahead_info
*sai
, const struct qstr
*qstr
)
248 struct ll_sa_entry
*entry
;
249 int i
= ll_sa_entry_hash(qstr
->hash
);
251 list_for_each_entry(entry
, &sai
->sai_cache
[i
], se_hash
) {
252 if (entry
->se_qstr
.hash
== qstr
->hash
&&
253 entry
->se_qstr
.len
== qstr
->len
&&
254 memcmp(entry
->se_qstr
.name
, qstr
->name
, qstr
->len
) == 0)
261 * Used by the async getattr request callback to find entry with index.
263 * Inside lli_sa_lock to prevent others to change the list during the search.
264 * It needs to increase entry refcount before returning to guarantee that the
265 * entry cannot be freed by others.
267 static struct ll_sa_entry
*
268 ll_sa_entry_get_byindex(struct ll_statahead_info
*sai
, __u64 index
)
270 struct ll_sa_entry
*entry
;
272 list_for_each_entry(entry
, &sai
->sai_entries
, se_link
) {
273 if (entry
->se_index
== index
) {
274 LASSERT(atomic_read(&entry
->se_refcount
) > 0);
275 atomic_inc(&entry
->se_refcount
);
278 if (entry
->se_index
> index
)
284 static void ll_sa_entry_cleanup(struct ll_statahead_info
*sai
,
285 struct ll_sa_entry
*entry
)
287 struct md_enqueue_info
*minfo
= entry
->se_minfo
;
288 struct ptlrpc_request
*req
= entry
->se_req
;
291 entry
->se_minfo
= NULL
;
292 ll_intent_release(&minfo
->mi_it
);
298 entry
->se_req
= NULL
;
299 ptlrpc_req_finished(req
);
303 static void ll_sa_entry_put(struct ll_statahead_info
*sai
,
304 struct ll_sa_entry
*entry
)
306 if (atomic_dec_and_test(&entry
->se_refcount
)) {
307 CDEBUG(D_READA
, "free sa entry %.*s(%p) index %llu\n",
308 entry
->se_qstr
.len
, entry
->se_qstr
.name
, entry
,
311 LASSERT(list_empty(&entry
->se_link
));
312 LASSERT(list_empty(&entry
->se_list
));
313 LASSERT(list_empty(&entry
->se_hash
));
315 ll_sa_entry_cleanup(sai
, entry
);
316 iput(entry
->se_inode
);
319 atomic_dec(&sai
->sai_cache_count
);
324 do_sa_entry_fini(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
326 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
328 LASSERT(!list_empty(&entry
->se_hash
));
329 LASSERT(!list_empty(&entry
->se_link
));
331 ll_sa_entry_unhash(sai
, entry
);
333 spin_lock(&lli
->lli_sa_lock
);
334 entry
->se_stat
= SA_ENTRY_DEST
;
335 list_del_init(&entry
->se_link
);
336 if (likely(!list_empty(&entry
->se_list
)))
337 list_del_init(&entry
->se_list
);
338 spin_unlock(&lli
->lli_sa_lock
);
340 ll_sa_entry_put(sai
, entry
);
344 * Delete it from sai_entries_stated list when fini.
347 ll_sa_entry_fini(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
349 struct ll_sa_entry
*pos
, *next
;
352 do_sa_entry_fini(sai
, entry
);
354 /* drop old entry, only 'scanner' process does this, no need to lock */
355 list_for_each_entry_safe(pos
, next
, &sai
->sai_entries
, se_link
) {
356 if (!is_omitted_entry(sai
, pos
->se_index
))
358 do_sa_entry_fini(sai
, pos
);
363 * Inside lli_sa_lock.
366 do_sa_entry_to_stated(struct ll_statahead_info
*sai
,
367 struct ll_sa_entry
*entry
, enum se_stat stat
)
369 struct ll_sa_entry
*se
;
370 struct list_head
*pos
= &sai
->sai_entries_stated
;
372 if (!list_empty(&entry
->se_list
))
373 list_del_init(&entry
->se_list
);
375 list_for_each_entry_reverse(se
, &sai
->sai_entries_stated
, se_list
) {
376 if (se
->se_index
< entry
->se_index
) {
382 list_add(&entry
->se_list
, pos
);
383 entry
->se_stat
= stat
;
387 * Move entry to sai_entries_stated and sort with the index.
388 * \retval 1 -- entry to be destroyed.
389 * \retval 0 -- entry is inserted into stated list.
392 ll_sa_entry_to_stated(struct ll_statahead_info
*sai
,
393 struct ll_sa_entry
*entry
, enum se_stat stat
)
395 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
398 ll_sa_entry_cleanup(sai
, entry
);
400 spin_lock(&lli
->lli_sa_lock
);
401 if (likely(entry
->se_stat
!= SA_ENTRY_DEST
)) {
402 do_sa_entry_to_stated(sai
, entry
, stat
);
405 spin_unlock(&lli
->lli_sa_lock
);
411 * Insert inode into the list of sai_entries_agl.
413 static void ll_agl_add(struct ll_statahead_info
*sai
,
414 struct inode
*inode
, int index
)
416 struct ll_inode_info
*child
= ll_i2info(inode
);
417 struct ll_inode_info
*parent
= ll_i2info(sai
->sai_inode
);
420 spin_lock(&child
->lli_agl_lock
);
421 if (child
->lli_agl_index
== 0) {
422 child
->lli_agl_index
= index
;
423 spin_unlock(&child
->lli_agl_lock
);
425 LASSERT(list_empty(&child
->lli_agl_list
));
428 spin_lock(&parent
->lli_agl_lock
);
429 if (list_empty(&sai
->sai_entries_agl
))
431 list_add_tail(&child
->lli_agl_list
, &sai
->sai_entries_agl
);
432 spin_unlock(&parent
->lli_agl_lock
);
434 spin_unlock(&child
->lli_agl_lock
);
438 wake_up(&sai
->sai_agl_thread
.t_ctl_waitq
);
441 static struct ll_statahead_info
*ll_sai_alloc(void)
443 struct ll_statahead_info
*sai
;
446 sai
= kzalloc(sizeof(*sai
), GFP_NOFS
);
450 atomic_set(&sai
->sai_refcount
, 1);
452 spin_lock(&sai_generation_lock
);
453 sai
->sai_generation
= ++sai_generation
;
454 if (unlikely(sai_generation
== 0))
455 sai
->sai_generation
= ++sai_generation
;
456 spin_unlock(&sai_generation_lock
);
458 sai
->sai_max
= LL_SA_RPC_MIN
;
460 init_waitqueue_head(&sai
->sai_waitq
);
461 init_waitqueue_head(&sai
->sai_thread
.t_ctl_waitq
);
462 init_waitqueue_head(&sai
->sai_agl_thread
.t_ctl_waitq
);
464 INIT_LIST_HEAD(&sai
->sai_entries
);
465 INIT_LIST_HEAD(&sai
->sai_entries_received
);
466 INIT_LIST_HEAD(&sai
->sai_entries_stated
);
467 INIT_LIST_HEAD(&sai
->sai_entries_agl
);
469 for (i
= 0; i
< LL_SA_CACHE_SIZE
; i
++) {
470 INIT_LIST_HEAD(&sai
->sai_cache
[i
]);
471 spin_lock_init(&sai
->sai_cache_lock
[i
]);
473 atomic_set(&sai
->sai_cache_count
, 0);
478 static inline struct ll_statahead_info
*
479 ll_sai_get(struct ll_statahead_info
*sai
)
481 atomic_inc(&sai
->sai_refcount
);
485 static void ll_sai_put(struct ll_statahead_info
*sai
)
487 struct inode
*inode
= sai
->sai_inode
;
488 struct ll_inode_info
*lli
= ll_i2info(inode
);
490 if (atomic_dec_and_lock(&sai
->sai_refcount
, &lli
->lli_sa_lock
)) {
491 struct ll_sa_entry
*entry
, *next
;
493 if (unlikely(atomic_read(&sai
->sai_refcount
) > 0)) {
494 /* It is race case, the interpret callback just hold
497 spin_unlock(&lli
->lli_sa_lock
);
501 LASSERT(!lli
->lli_opendir_key
);
502 LASSERT(thread_is_stopped(&sai
->sai_thread
));
503 LASSERT(thread_is_stopped(&sai
->sai_agl_thread
));
506 lli
->lli_opendir_pid
= 0;
507 spin_unlock(&lli
->lli_sa_lock
);
509 if (sai
->sai_sent
> sai
->sai_replied
)
510 CDEBUG(D_READA
, "statahead for dir "DFID
511 " does not finish: [sent:%llu] [replied:%llu]\n",
513 sai
->sai_sent
, sai
->sai_replied
);
515 list_for_each_entry_safe(entry
, next
, &sai
->sai_entries
,
517 do_sa_entry_fini(sai
, entry
);
519 LASSERT(list_empty(&sai
->sai_entries
));
520 LASSERT(list_empty(&sai
->sai_entries_received
));
521 LASSERT(list_empty(&sai
->sai_entries_stated
));
523 LASSERT(atomic_read(&sai
->sai_cache_count
) == 0);
524 LASSERT(list_empty(&sai
->sai_entries_agl
));
531 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
532 static void ll_agl_trigger(struct inode
*inode
, struct ll_statahead_info
*sai
)
534 struct ll_inode_info
*lli
= ll_i2info(inode
);
535 __u64 index
= lli
->lli_agl_index
;
538 LASSERT(list_empty(&lli
->lli_agl_list
));
540 /* AGL maybe fall behind statahead with one entry */
541 if (is_omitted_entry(sai
, index
+ 1)) {
542 lli
->lli_agl_index
= 0;
547 /* Someone is in glimpse (sync or async), do nothing. */
548 rc
= down_write_trylock(&lli
->lli_glimpse_sem
);
550 lli
->lli_agl_index
= 0;
556 * Someone triggered glimpse within 1 sec before.
557 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
558 * if the lock is still cached on client, AGL needs to do nothing. If
559 * it is cancelled by other client, AGL maybe cannot obtain new lock
560 * for no glimpse callback triggered by AGL.
561 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
562 * Under such case, it is quite possible that the OST will not grant
563 * glimpse lock for AGL also.
564 * 3) The former glimpse failed, compared with other two cases, it is
565 * relative rare. AGL can ignore such case, and it will not muchly
566 * affect the performance.
568 if (lli
->lli_glimpse_time
!= 0 &&
569 time_before(cfs_time_shift(-1), lli
->lli_glimpse_time
)) {
570 up_write(&lli
->lli_glimpse_sem
);
571 lli
->lli_agl_index
= 0;
576 CDEBUG(D_READA
, "Handling (init) async glimpse: inode = "
577 DFID
", idx = %llu\n", PFID(&lli
->lli_fid
), index
);
580 lli
->lli_agl_index
= 0;
581 lli
->lli_glimpse_time
= cfs_time_current();
582 up_write(&lli
->lli_glimpse_sem
);
584 CDEBUG(D_READA
, "Handled (init) async glimpse: inode= "
585 DFID
", idx = %llu, rc = %d\n",
586 PFID(&lli
->lli_fid
), index
, rc
);
591 static void ll_post_statahead(struct ll_statahead_info
*sai
)
593 struct inode
*dir
= sai
->sai_inode
;
595 struct ll_inode_info
*lli
= ll_i2info(dir
);
596 struct ll_sa_entry
*entry
;
597 struct md_enqueue_info
*minfo
;
598 struct lookup_intent
*it
;
599 struct ptlrpc_request
*req
;
600 struct mdt_body
*body
;
603 spin_lock(&lli
->lli_sa_lock
);
604 if (unlikely(list_empty(&sai
->sai_entries_received
))) {
605 spin_unlock(&lli
->lli_sa_lock
);
608 entry
= list_entry(sai
->sai_entries_received
.next
,
609 struct ll_sa_entry
, se_list
);
610 atomic_inc(&entry
->se_refcount
);
611 list_del_init(&entry
->se_list
);
612 spin_unlock(&lli
->lli_sa_lock
);
614 LASSERT(entry
->se_handle
!= 0);
616 minfo
= entry
->se_minfo
;
619 body
= req_capsule_server_get(&req
->rq_pill
, &RMF_MDT_BODY
);
625 child
= entry
->se_inode
;
630 LASSERT(fid_is_zero(&minfo
->mi_data
.op_fid2
));
632 /* XXX: No fid in reply, this is probably cross-ref case.
633 * SA can't handle it yet.
635 if (body
->valid
& OBD_MD_MDS
) {
643 /* unlinked and re-created with the same name */
644 if (unlikely(!lu_fid_eq(&minfo
->mi_data
.op_fid2
, &body
->fid1
))) {
645 entry
->se_inode
= NULL
;
651 it
->it_lock_handle
= entry
->se_handle
;
652 rc
= md_revalidate_lock(ll_i2mdexp(dir
), it
, ll_inode2fid(dir
), NULL
);
658 rc
= ll_prep_inode(&child
, req
, dir
->i_sb
, it
);
662 CDEBUG(D_DLMTRACE
, "%s: setting l_data to inode "DFID
"%p\n",
663 ll_get_fsname(child
->i_sb
, NULL
, 0),
664 PFID(ll_inode2fid(child
)), child
);
665 ll_set_lock_data(ll_i2sbi(dir
)->ll_md_exp
, child
, it
, NULL
);
667 entry
->se_inode
= child
;
669 if (agl_should_run(sai
, child
))
670 ll_agl_add(sai
, child
, entry
->se_index
);
673 /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
674 * reference count by calling "ll_intent_drop_lock()" in spite of the
675 * above operations failed or not. Do not worry about calling
676 * "ll_intent_drop_lock()" more than once.
678 rc
= ll_sa_entry_to_stated(sai
, entry
,
679 rc
< 0 ? SA_ENTRY_INVA
: SA_ENTRY_SUCC
);
680 if (rc
== 0 && entry
->se_index
== sai
->sai_index_wait
)
681 wake_up(&sai
->sai_waitq
);
682 ll_sa_entry_put(sai
, entry
);
685 static int ll_statahead_interpret(struct ptlrpc_request
*req
,
686 struct md_enqueue_info
*minfo
, int rc
)
688 struct lookup_intent
*it
= &minfo
->mi_it
;
689 struct inode
*dir
= minfo
->mi_dir
;
690 struct ll_inode_info
*lli
= ll_i2info(dir
);
691 struct ll_statahead_info
*sai
= NULL
;
692 struct ll_sa_entry
*entry
;
696 if (it_disposition(it
, DISP_LOOKUP_NEG
))
700 /* release ibits lock ASAP to avoid deadlock when statahead
701 * thread enqueues lock on parent in readdir and another
702 * process enqueues lock on child with parent lock held, eg.
705 handle
= it
->it_lock_handle
;
706 ll_intent_drop_lock(it
);
709 spin_lock(&lli
->lli_sa_lock
);
711 if (unlikely(!lli
->lli_sai
||
712 lli
->lli_sai
->sai_generation
!= minfo
->mi_generation
)) {
713 spin_unlock(&lli
->lli_sa_lock
);
717 sai
= ll_sai_get(lli
->lli_sai
);
718 if (unlikely(!thread_is_running(&sai
->sai_thread
))) {
720 spin_unlock(&lli
->lli_sa_lock
);
725 entry
= ll_sa_entry_get_byindex(sai
, minfo
->mi_cbdata
);
728 spin_unlock(&lli
->lli_sa_lock
);
734 do_sa_entry_to_stated(sai
, entry
, SA_ENTRY_INVA
);
735 wakeup
= (entry
->se_index
== sai
->sai_index_wait
);
737 entry
->se_minfo
= minfo
;
738 entry
->se_req
= ptlrpc_request_addref(req
);
739 /* Release the async ibits lock ASAP to avoid deadlock
740 * when statahead thread tries to enqueue lock on parent
741 * for readpage and other tries to enqueue lock on child
742 * with parent's lock held, for example: unlink.
744 entry
->se_handle
= handle
;
745 wakeup
= list_empty(&sai
->sai_entries_received
);
746 list_add_tail(&entry
->se_list
,
747 &sai
->sai_entries_received
);
750 spin_unlock(&lli
->lli_sa_lock
);
752 ll_sa_entry_put(sai
, entry
);
754 wake_up(&sai
->sai_thread
.t_ctl_waitq
);
759 ll_intent_release(it
);
768 static void sa_args_fini(struct md_enqueue_info
*minfo
,
769 struct ldlm_enqueue_info
*einfo
)
771 LASSERT(minfo
&& einfo
);
778 * prepare arguments for async stat RPC.
780 static int sa_args_init(struct inode
*dir
, struct inode
*child
,
781 struct ll_sa_entry
*entry
, struct md_enqueue_info
**pmi
,
782 struct ldlm_enqueue_info
**pei
)
784 const struct qstr
*qstr
= &entry
->se_qstr
;
785 struct ll_inode_info
*lli
= ll_i2info(dir
);
786 struct md_enqueue_info
*minfo
;
787 struct ldlm_enqueue_info
*einfo
;
788 struct md_op_data
*op_data
;
790 einfo
= kzalloc(sizeof(*einfo
), GFP_NOFS
);
794 minfo
= kzalloc(sizeof(*minfo
), GFP_NOFS
);
800 op_data
= ll_prep_md_op_data(&minfo
->mi_data
, dir
, child
, qstr
->name
,
801 qstr
->len
, 0, LUSTRE_OPC_ANY
, NULL
);
802 if (IS_ERR(op_data
)) {
805 return PTR_ERR(op_data
);
808 minfo
->mi_it
.it_op
= IT_GETATTR
;
809 minfo
->mi_dir
= igrab(dir
);
810 minfo
->mi_cb
= ll_statahead_interpret
;
811 minfo
->mi_generation
= lli
->lli_sai
->sai_generation
;
812 minfo
->mi_cbdata
= entry
->se_index
;
814 einfo
->ei_type
= LDLM_IBITS
;
815 einfo
->ei_mode
= it_to_lock_mode(&minfo
->mi_it
);
816 einfo
->ei_cb_bl
= ll_md_blocking_ast
;
817 einfo
->ei_cb_cp
= ldlm_completion_ast
;
818 einfo
->ei_cb_gl
= NULL
;
819 einfo
->ei_cbdata
= NULL
;
827 static int do_sa_lookup(struct inode
*dir
, struct ll_sa_entry
*entry
)
829 struct md_enqueue_info
*minfo
;
830 struct ldlm_enqueue_info
*einfo
;
833 rc
= sa_args_init(dir
, NULL
, entry
, &minfo
, &einfo
);
837 rc
= md_intent_getattr_async(ll_i2mdexp(dir
), minfo
, einfo
);
839 sa_args_fini(minfo
, einfo
);
845 * similar to ll_revalidate_it().
846 * \retval 1 -- dentry valid
847 * \retval 0 -- will send stat-ahead request
848 * \retval others -- prepare stat-ahead request failed
850 static int do_sa_revalidate(struct inode
*dir
, struct ll_sa_entry
*entry
,
851 struct dentry
*dentry
)
853 struct inode
*inode
= d_inode(dentry
);
854 struct lookup_intent it
= { .it_op
= IT_GETATTR
,
855 .it_lock_handle
= 0 };
856 struct md_enqueue_info
*minfo
;
857 struct ldlm_enqueue_info
*einfo
;
860 if (unlikely(!inode
))
863 if (d_mountpoint(dentry
))
866 entry
->se_inode
= igrab(inode
);
867 rc
= md_revalidate_lock(ll_i2mdexp(dir
), &it
, ll_inode2fid(inode
),
870 entry
->se_handle
= it
.it_lock_handle
;
871 ll_intent_release(&it
);
875 rc
= sa_args_init(dir
, inode
, entry
, &minfo
, &einfo
);
877 entry
->se_inode
= NULL
;
882 rc
= md_intent_getattr_async(ll_i2mdexp(dir
), minfo
, einfo
);
884 entry
->se_inode
= NULL
;
886 sa_args_fini(minfo
, einfo
);
892 static void ll_statahead_one(struct dentry
*parent
, const char *entry_name
,
895 struct inode
*dir
= d_inode(parent
);
896 struct ll_inode_info
*lli
= ll_i2info(dir
);
897 struct ll_statahead_info
*sai
= lli
->lli_sai
;
898 struct dentry
*dentry
= NULL
;
899 struct ll_sa_entry
*entry
;
903 entry
= ll_sa_entry_alloc(parent
, sai
, sai
->sai_index
, entry_name
,
908 dentry
= d_lookup(parent
, &entry
->se_qstr
);
910 rc
= do_sa_lookup(dir
, entry
);
912 rc
= do_sa_revalidate(dir
, entry
, dentry
);
913 if (rc
== 1 && agl_should_run(sai
, d_inode(dentry
)))
914 ll_agl_add(sai
, d_inode(dentry
), entry
->se_index
);
920 rc1
= ll_sa_entry_to_stated(sai
, entry
,
921 rc
< 0 ? SA_ENTRY_INVA
: SA_ENTRY_SUCC
);
922 if (rc1
== 0 && entry
->se_index
== sai
->sai_index_wait
)
923 wake_up(&sai
->sai_waitq
);
929 /* drop one refcount on entry by ll_sa_entry_alloc */
930 ll_sa_entry_put(sai
, entry
);
933 static int ll_agl_thread(void *arg
)
935 struct dentry
*parent
= arg
;
936 struct inode
*dir
= d_inode(parent
);
937 struct ll_inode_info
*plli
= ll_i2info(dir
);
938 struct ll_inode_info
*clli
;
939 struct ll_sb_info
*sbi
= ll_i2sbi(dir
);
940 struct ll_statahead_info
*sai
= ll_sai_get(plli
->lli_sai
);
941 struct ptlrpc_thread
*thread
= &sai
->sai_agl_thread
;
942 struct l_wait_info lwi
= { 0 };
944 thread
->t_pid
= current_pid();
945 CDEBUG(D_READA
, "agl thread started: sai %p, parent %pd\n",
948 atomic_inc(&sbi
->ll_agl_total
);
949 spin_lock(&plli
->lli_agl_lock
);
950 sai
->sai_agl_valid
= 1;
951 if (thread_is_init(thread
))
952 /* If someone else has changed the thread state
953 * (e.g. already changed to SVC_STOPPING), we can't just
954 * blindly overwrite that setting.
956 thread_set_flags(thread
, SVC_RUNNING
);
957 spin_unlock(&plli
->lli_agl_lock
);
958 wake_up(&thread
->t_ctl_waitq
);
961 l_wait_event(thread
->t_ctl_waitq
,
962 !list_empty(&sai
->sai_entries_agl
) ||
963 !thread_is_running(thread
),
966 if (!thread_is_running(thread
))
969 spin_lock(&plli
->lli_agl_lock
);
970 /* The statahead thread maybe help to process AGL entries,
971 * so check whether list empty again.
973 if (!list_empty(&sai
->sai_entries_agl
)) {
974 clli
= list_entry(sai
->sai_entries_agl
.next
,
975 struct ll_inode_info
, lli_agl_list
);
976 list_del_init(&clli
->lli_agl_list
);
977 spin_unlock(&plli
->lli_agl_lock
);
978 ll_agl_trigger(&clli
->lli_vfs_inode
, sai
);
980 spin_unlock(&plli
->lli_agl_lock
);
984 spin_lock(&plli
->lli_agl_lock
);
985 sai
->sai_agl_valid
= 0;
986 while (!list_empty(&sai
->sai_entries_agl
)) {
987 clli
= list_entry(sai
->sai_entries_agl
.next
,
988 struct ll_inode_info
, lli_agl_list
);
989 list_del_init(&clli
->lli_agl_list
);
990 spin_unlock(&plli
->lli_agl_lock
);
991 clli
->lli_agl_index
= 0;
992 iput(&clli
->lli_vfs_inode
);
993 spin_lock(&plli
->lli_agl_lock
);
995 thread_set_flags(thread
, SVC_STOPPED
);
996 spin_unlock(&plli
->lli_agl_lock
);
997 wake_up(&thread
->t_ctl_waitq
);
999 CDEBUG(D_READA
, "agl thread stopped: sai %p, parent %pd\n",
1004 static void ll_start_agl(struct dentry
*parent
, struct ll_statahead_info
*sai
)
1006 struct ptlrpc_thread
*thread
= &sai
->sai_agl_thread
;
1007 struct l_wait_info lwi
= { 0 };
1008 struct ll_inode_info
*plli
;
1009 struct task_struct
*task
;
1011 CDEBUG(D_READA
, "start agl thread: sai %p, parent %pd\n",
1014 plli
= ll_i2info(d_inode(parent
));
1015 task
= kthread_run(ll_agl_thread
, parent
, "ll_agl_%u",
1016 plli
->lli_opendir_pid
);
1018 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task
));
1019 thread_set_flags(thread
, SVC_STOPPED
);
1023 l_wait_event(thread
->t_ctl_waitq
,
1024 thread_is_running(thread
) || thread_is_stopped(thread
),
1028 static int ll_statahead_thread(void *arg
)
1030 struct dentry
*parent
= arg
;
1031 struct inode
*dir
= d_inode(parent
);
1032 struct ll_inode_info
*plli
= ll_i2info(dir
);
1033 struct ll_inode_info
*clli
;
1034 struct ll_sb_info
*sbi
= ll_i2sbi(dir
);
1035 struct ll_statahead_info
*sai
= ll_sai_get(plli
->lli_sai
);
1036 struct ptlrpc_thread
*thread
= &sai
->sai_thread
;
1037 struct ptlrpc_thread
*agl_thread
= &sai
->sai_agl_thread
;
1042 struct ll_dir_chain chain
;
1043 struct l_wait_info lwi
= { 0 };
1045 thread
->t_pid
= current_pid();
1046 CDEBUG(D_READA
, "statahead thread starting: sai %p, parent %pd\n",
1049 if (sbi
->ll_flags
& LL_SBI_AGL_ENABLED
)
1050 ll_start_agl(parent
, sai
);
1052 atomic_inc(&sbi
->ll_sa_total
);
1053 spin_lock(&plli
->lli_sa_lock
);
1054 if (thread_is_init(thread
))
1055 /* If someone else has changed the thread state
1056 * (e.g. already changed to SVC_STOPPING), we can't just
1057 * blindly overwrite that setting.
1059 thread_set_flags(thread
, SVC_RUNNING
);
1060 spin_unlock(&plli
->lli_sa_lock
);
1061 wake_up(&thread
->t_ctl_waitq
);
1063 ll_dir_chain_init(&chain
);
1064 page
= ll_get_dir_page(dir
, pos
, &chain
);
1067 struct lu_dirpage
*dp
;
1068 struct lu_dirent
*ent
;
1072 CDEBUG(D_READA
, "error reading dir "DFID
" at %llu/%llu: [rc %d] [parent %u]\n",
1073 PFID(ll_inode2fid(dir
)), pos
, sai
->sai_index
,
1074 rc
, plli
->lli_opendir_pid
);
1078 dp
= page_address(page
);
1079 for (ent
= lu_dirent_start(dp
); ent
;
1080 ent
= lu_dirent_next(ent
)) {
1085 hash
= le64_to_cpu(ent
->lde_hash
);
1086 if (unlikely(hash
< pos
))
1088 * Skip until we find target hash value.
1092 namelen
= le16_to_cpu(ent
->lde_namelen
);
1093 if (unlikely(namelen
== 0))
1095 * Skip dummy record.
1099 name
= ent
->lde_name
;
1100 if (name
[0] == '.') {
1106 } else if (name
[1] == '.' && namelen
== 2) {
1111 } else if (!sai
->sai_ls_all
) {
1113 * skip hidden files.
1115 sai
->sai_skip_hidden
++;
1121 * don't stat-ahead first entry.
1123 if (unlikely(++first
== 1))
1127 l_wait_event(thread
->t_ctl_waitq
,
1128 !sa_sent_full(sai
) ||
1129 !list_empty(&sai
->sai_entries_received
) ||
1130 !list_empty(&sai
->sai_entries_agl
) ||
1131 !thread_is_running(thread
),
1135 while (!list_empty(&sai
->sai_entries_received
))
1136 ll_post_statahead(sai
);
1138 if (unlikely(!thread_is_running(thread
))) {
1139 ll_release_page(page
, 0);
1144 /* If no window for metadata statahead, but there are
1145 * some AGL entries to be triggered, then try to help
1146 * to process the AGL entries.
1148 if (sa_sent_full(sai
)) {
1149 spin_lock(&plli
->lli_agl_lock
);
1150 while (!list_empty(&sai
->sai_entries_agl
)) {
1151 clli
= list_entry(sai
->sai_entries_agl
.next
,
1152 struct ll_inode_info
, lli_agl_list
);
1153 list_del_init(&clli
->lli_agl_list
);
1154 spin_unlock(&plli
->lli_agl_lock
);
1155 ll_agl_trigger(&clli
->lli_vfs_inode
,
1158 if (!list_empty(&sai
->sai_entries_received
))
1162 !thread_is_running(thread
))) {
1163 ll_release_page(page
, 0);
1168 if (!sa_sent_full(sai
))
1171 spin_lock(&plli
->lli_agl_lock
);
1173 spin_unlock(&plli
->lli_agl_lock
);
1179 ll_statahead_one(parent
, name
, namelen
);
1181 pos
= le64_to_cpu(dp
->ldp_hash_end
);
1182 if (pos
== MDS_DIR_END_OFF
) {
1184 * End of directory reached.
1186 ll_release_page(page
, 0);
1188 l_wait_event(thread
->t_ctl_waitq
,
1189 !list_empty(&sai
->sai_entries_received
) ||
1190 sai
->sai_sent
== sai
->sai_replied
||
1191 !thread_is_running(thread
),
1194 while (!list_empty(&sai
->sai_entries_received
))
1195 ll_post_statahead(sai
);
1197 if (unlikely(!thread_is_running(thread
))) {
1202 if (sai
->sai_sent
== sai
->sai_replied
&&
1203 list_empty(&sai
->sai_entries_received
))
1207 spin_lock(&plli
->lli_agl_lock
);
1208 while (!list_empty(&sai
->sai_entries_agl
) &&
1209 thread_is_running(thread
)) {
1210 clli
= list_entry(sai
->sai_entries_agl
.next
,
1211 struct ll_inode_info
, lli_agl_list
);
1212 list_del_init(&clli
->lli_agl_list
);
1213 spin_unlock(&plli
->lli_agl_lock
);
1214 ll_agl_trigger(&clli
->lli_vfs_inode
, sai
);
1215 spin_lock(&plli
->lli_agl_lock
);
1217 spin_unlock(&plli
->lli_agl_lock
);
1223 * chain is exhausted.
1224 * Normal case: continue to the next page.
1226 ll_release_page(page
, le32_to_cpu(dp
->ldp_flags
) &
1228 page
= ll_get_dir_page(dir
, pos
, &chain
);
1230 LASSERT(le32_to_cpu(dp
->ldp_flags
) & LDF_COLLIDE
);
1231 ll_release_page(page
, 1);
1233 * go into overflow page.
1239 if (sai
->sai_agl_valid
) {
1240 spin_lock(&plli
->lli_agl_lock
);
1241 thread_set_flags(agl_thread
, SVC_STOPPING
);
1242 spin_unlock(&plli
->lli_agl_lock
);
1243 wake_up(&agl_thread
->t_ctl_waitq
);
1245 CDEBUG(D_READA
, "stop agl thread: sai %p pid %u\n",
1246 sai
, (unsigned int)agl_thread
->t_pid
);
1247 l_wait_event(agl_thread
->t_ctl_waitq
,
1248 thread_is_stopped(agl_thread
),
1251 /* Set agl_thread flags anyway. */
1252 thread_set_flags(&sai
->sai_agl_thread
, SVC_STOPPED
);
1254 ll_dir_chain_fini(&chain
);
1255 spin_lock(&plli
->lli_sa_lock
);
1256 if (!list_empty(&sai
->sai_entries_received
)) {
1257 thread_set_flags(thread
, SVC_STOPPING
);
1258 spin_unlock(&plli
->lli_sa_lock
);
1260 /* To release the resources held by received entries. */
1261 while (!list_empty(&sai
->sai_entries_received
))
1262 ll_post_statahead(sai
);
1264 spin_lock(&plli
->lli_sa_lock
);
1266 thread_set_flags(thread
, SVC_STOPPED
);
1267 spin_unlock(&plli
->lli_sa_lock
);
1268 wake_up(&sai
->sai_waitq
);
1269 wake_up(&thread
->t_ctl_waitq
);
1272 CDEBUG(D_READA
, "statahead thread stopped: sai %p, parent %pd\n",
1278 * called in ll_file_release().
1280 void ll_stop_statahead(struct inode
*dir
, void *key
)
1282 struct ll_inode_info
*lli
= ll_i2info(dir
);
1287 spin_lock(&lli
->lli_sa_lock
);
1288 if (lli
->lli_opendir_key
!= key
|| lli
->lli_opendir_pid
== 0) {
1289 spin_unlock(&lli
->lli_sa_lock
);
1293 lli
->lli_opendir_key
= NULL
;
1296 struct l_wait_info lwi
= { 0 };
1297 struct ptlrpc_thread
*thread
= &lli
->lli_sai
->sai_thread
;
1299 if (!thread_is_stopped(thread
)) {
1300 thread_set_flags(thread
, SVC_STOPPING
);
1301 spin_unlock(&lli
->lli_sa_lock
);
1302 wake_up(&thread
->t_ctl_waitq
);
1304 CDEBUG(D_READA
, "stop statahead thread: sai %p pid %u\n",
1305 lli
->lli_sai
, (unsigned int)thread
->t_pid
);
1306 l_wait_event(thread
->t_ctl_waitq
,
1307 thread_is_stopped(thread
),
1310 spin_unlock(&lli
->lli_sa_lock
);
1314 * Put the ref which was held when first statahead_enter.
1315 * It maybe not the last ref for some statahead requests
1318 ll_sai_put(lli
->lli_sai
);
1320 lli
->lli_opendir_pid
= 0;
1321 spin_unlock(&lli
->lli_sa_lock
);
1327 * not first dirent, or is "."
1329 LS_NONE_FIRST_DE
= 0,
1331 * the first non-hidden dirent
1335 * the first hidden dirent, that is "."
1340 static int is_first_dirent(struct inode
*dir
, struct dentry
*dentry
)
1342 struct ll_dir_chain chain
;
1343 const struct qstr
*target
= &dentry
->d_name
;
1347 int rc
= LS_NONE_FIRST_DE
;
1349 ll_dir_chain_init(&chain
);
1350 page
= ll_get_dir_page(dir
, pos
, &chain
);
1353 struct lu_dirpage
*dp
;
1354 struct lu_dirent
*ent
;
1357 struct ll_inode_info
*lli
= ll_i2info(dir
);
1360 CERROR("error reading dir "DFID
" at %llu: [rc %d] [parent %u]\n",
1361 PFID(ll_inode2fid(dir
)), pos
,
1362 rc
, lli
->lli_opendir_pid
);
1366 dp
= page_address(page
);
1367 for (ent
= lu_dirent_start(dp
); ent
;
1368 ent
= lu_dirent_next(ent
)) {
1373 hash
= le64_to_cpu(ent
->lde_hash
);
1374 /* The ll_get_dir_page() can return any page containing
1375 * the given hash which may be not the start hash.
1377 if (unlikely(hash
< pos
))
1380 namelen
= le16_to_cpu(ent
->lde_namelen
);
1381 if (unlikely(namelen
== 0))
1383 * skip dummy record.
1387 name
= ent
->lde_name
;
1388 if (name
[0] == '.') {
1394 else if (name
[1] == '.' && namelen
== 2)
1405 if (dot_de
&& target
->name
[0] != '.') {
1406 CDEBUG(D_READA
, "%.*s skip hidden file %.*s\n",
1407 target
->len
, target
->name
,
1412 if (target
->len
!= namelen
||
1413 memcmp(target
->name
, name
, namelen
) != 0)
1414 rc
= LS_NONE_FIRST_DE
;
1418 rc
= LS_FIRST_DOT_DE
;
1420 ll_release_page(page
, 0);
1423 pos
= le64_to_cpu(dp
->ldp_hash_end
);
1424 if (pos
== MDS_DIR_END_OFF
) {
1426 * End of directory reached.
1428 ll_release_page(page
, 0);
1432 * chain is exhausted
1433 * Normal case: continue to the next page.
1435 ll_release_page(page
, le32_to_cpu(dp
->ldp_flags
) &
1437 page
= ll_get_dir_page(dir
, pos
, &chain
);
1440 * go into overflow page.
1442 LASSERT(le32_to_cpu(dp
->ldp_flags
) & LDF_COLLIDE
);
1443 ll_release_page(page
, 1);
1448 ll_dir_chain_fini(&chain
);
1453 ll_sai_unplug(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
1455 struct ptlrpc_thread
*thread
= &sai
->sai_thread
;
1456 struct ll_sb_info
*sbi
= ll_i2sbi(sai
->sai_inode
);
1459 if (entry
&& entry
->se_stat
== SA_ENTRY_SUCC
)
1464 ll_sa_entry_fini(sai
, entry
);
1467 sai
->sai_consecutive_miss
= 0;
1468 sai
->sai_max
= min(2 * sai
->sai_max
, sbi
->ll_sa_max
);
1470 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
1473 sai
->sai_consecutive_miss
++;
1474 if (sa_low_hit(sai
) && thread_is_running(thread
)) {
1475 atomic_inc(&sbi
->ll_sa_wrong
);
1476 CDEBUG(D_READA
, "Statahead for dir " DFID
" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1477 PFID(&lli
->lli_fid
), sai
->sai_hit
,
1478 sai
->sai_miss
, sai
->sai_sent
,
1480 spin_lock(&lli
->lli_sa_lock
);
1481 if (!thread_is_stopped(thread
))
1482 thread_set_flags(thread
, SVC_STOPPING
);
1483 spin_unlock(&lli
->lli_sa_lock
);
1487 if (!thread_is_stopped(thread
))
1488 wake_up(&thread
->t_ctl_waitq
);
1492 * Start statahead thread if this is the first dir entry.
1493 * Otherwise if a thread is started already, wait it until it is ahead of me.
1494 * \retval 1 -- find entry with lock in cache, the caller needs to do
1496 * \retval 0 -- find entry in cache, but without lock, the caller needs
1498 * \retval others -- the caller need to process as non-statahead.
1500 int do_statahead_enter(struct inode
*dir
, struct dentry
**dentryp
,
1503 struct ll_inode_info
*lli
= ll_i2info(dir
);
1504 struct ll_statahead_info
*sai
= lli
->lli_sai
;
1505 struct dentry
*parent
;
1506 struct ll_sa_entry
*entry
;
1507 struct ptlrpc_thread
*thread
;
1508 struct l_wait_info lwi
= { 0 };
1509 struct task_struct
*task
;
1511 struct ll_inode_info
*plli
;
1513 LASSERT(lli
->lli_opendir_pid
== current_pid());
1516 thread
= &sai
->sai_thread
;
1517 if (unlikely(thread_is_stopped(thread
) &&
1518 list_empty(&sai
->sai_entries_stated
))) {
1519 /* to release resource */
1520 ll_stop_statahead(dir
, lli
->lli_opendir_key
);
1524 if ((*dentryp
)->d_name
.name
[0] == '.') {
1525 if (sai
->sai_ls_all
||
1526 sai
->sai_miss_hidden
>= sai
->sai_skip_hidden
) {
1528 * Hidden dentry is the first one, or statahead
1529 * thread does not skip so many hidden dentries
1530 * before "sai_ls_all" enabled as below.
1533 if (!sai
->sai_ls_all
)
1535 * It maybe because hidden dentry is not
1536 * the first one, "sai_ls_all" was not
1537 * set, then "ls -al" missed. Enable
1538 * "sai_ls_all" for such case.
1540 sai
->sai_ls_all
= 1;
1543 * Such "getattr" has been skipped before
1544 * "sai_ls_all" enabled as above.
1546 sai
->sai_miss_hidden
++;
1551 entry
= ll_sa_entry_get_byname(sai
, &(*dentryp
)->d_name
);
1552 if (!entry
|| only_unplug
) {
1553 ll_sai_unplug(sai
, entry
);
1554 return entry
? 1 : -EAGAIN
;
1557 if (!ll_sa_entry_stated(entry
)) {
1558 sai
->sai_index_wait
= entry
->se_index
;
1559 lwi
= LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL
,
1560 LWI_ON_SIGNAL_NOOP
, NULL
);
1561 rc
= l_wait_event(sai
->sai_waitq
,
1562 ll_sa_entry_stated(entry
) ||
1563 thread_is_stopped(thread
),
1566 ll_sai_unplug(sai
, entry
);
1571 if (entry
->se_stat
== SA_ENTRY_SUCC
&& entry
->se_inode
) {
1572 struct inode
*inode
= entry
->se_inode
;
1573 struct lookup_intent it
= { .it_op
= IT_GETATTR
,
1578 rc
= md_revalidate_lock(ll_i2mdexp(dir
), &it
,
1579 ll_inode2fid(inode
), &bits
);
1581 if (!d_inode(*dentryp
)) {
1582 struct dentry
*alias
;
1584 alias
= ll_splice_alias(inode
,
1586 if (IS_ERR(alias
)) {
1587 ll_sai_unplug(sai
, entry
);
1588 return PTR_ERR(alias
);
1591 } else if (d_inode(*dentryp
) != inode
) {
1592 /* revalidate, but inode is recreated */
1593 CDEBUG(D_READA
, "%s: stale dentry %pd inode "DFID
", statahead inode "DFID
"\n",
1594 ll_get_fsname(d_inode(*dentryp
)->i_sb
, NULL
, 0),
1596 PFID(ll_inode2fid(d_inode(*dentryp
))),
1597 PFID(ll_inode2fid(inode
)));
1598 ll_sai_unplug(sai
, entry
);
1603 entry
->se_inode
= NULL
;
1605 if ((bits
& MDS_INODELOCK_LOOKUP
) &&
1606 d_lustre_invalid(*dentryp
))
1607 d_lustre_revalidate(*dentryp
);
1608 ll_intent_release(&it
);
1612 ll_sai_unplug(sai
, entry
);
1616 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1617 rc
= is_first_dirent(dir
, *dentryp
);
1618 if (rc
== LS_NONE_FIRST_DE
) {
1619 /* It is not "ls -{a}l" operation, no need statahead for it. */
1624 sai
= ll_sai_alloc();
1630 sai
->sai_ls_all
= (rc
== LS_FIRST_DOT_DE
);
1631 sai
->sai_inode
= igrab(dir
);
1632 if (unlikely(!sai
->sai_inode
)) {
1633 CWARN("Do not start stat ahead on dying inode "DFID
"\n",
1634 PFID(&lli
->lli_fid
));
1639 /* get parent reference count here, and put it in ll_statahead_thread */
1640 parent
= dget((*dentryp
)->d_parent
);
1641 if (unlikely(sai
->sai_inode
!= d_inode(parent
))) {
1642 struct ll_inode_info
*nlli
= ll_i2info(d_inode(parent
));
1644 CWARN("Race condition, someone changed %pd just now: old parent "DFID
", new parent "DFID
"\n",
1646 PFID(&lli
->lli_fid
), PFID(&nlli
->lli_fid
));
1648 iput(sai
->sai_inode
);
1653 CDEBUG(D_READA
, "start statahead thread: sai %p, parent %pd\n",
1656 /* The sai buffer already has one reference taken at allocation time,
1657 * but as soon as we expose the sai by attaching it to the lli that
1658 * default reference can be dropped by another thread calling
1659 * ll_stop_statahead. We need to take a local reference to protect
1660 * the sai buffer while we intend to access it.
1665 plli
= ll_i2info(d_inode(parent
));
1666 task
= kthread_run(ll_statahead_thread
, parent
, "ll_sa_%u",
1667 plli
->lli_opendir_pid
);
1668 thread
= &sai
->sai_thread
;
1671 CERROR("can't start ll_sa thread, rc: %d\n", rc
);
1673 lli
->lli_opendir_key
= NULL
;
1674 thread_set_flags(thread
, SVC_STOPPED
);
1675 thread_set_flags(&sai
->sai_agl_thread
, SVC_STOPPED
);
1676 /* Drop both our own local reference and the default
1677 * reference from allocation time.
1681 LASSERT(!lli
->lli_sai
);
1685 l_wait_event(thread
->t_ctl_waitq
,
1686 thread_is_running(thread
) || thread_is_stopped(thread
),
1691 * We don't stat-ahead for the first dirent since we are already in
1698 spin_lock(&lli
->lli_sa_lock
);
1699 lli
->lli_opendir_key
= NULL
;
1700 lli
->lli_opendir_pid
= 0;
1701 spin_unlock(&lli
->lli_sa_lock
);