]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/llite/statahead.c
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 #include <linux/sched.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
43 #define DEBUG_SUBSYSTEM S_LLITE
45 #include "../include/obd_support.h"
46 #include "../include/lustre_lite.h"
47 #include "../include/lustre_dlm.h"
48 #include "llite_internal.h"
50 #define SA_OMITTED_ENTRY_MAX 8ULL
53 /** negative values are for error cases */
54 SA_ENTRY_INIT
= 0, /** init entry */
55 SA_ENTRY_SUCC
= 1, /** stat succeed */
56 SA_ENTRY_INVA
= 2, /** invalid entry */
57 SA_ENTRY_DEST
= 3, /** entry to be destroyed */
61 /* link into sai->sai_entries */
62 struct list_head se_link
;
63 /* link into sai->sai_entries_{received,stated} */
64 struct list_head se_list
;
65 /* link into sai hash table locally */
66 struct list_head se_hash
;
67 /* entry reference count */
69 /* entry index in the sai */
71 /* low layer ldlm lock handle */
75 /* entry size, contains name */
77 /* pointer to async getattr enqueue info */
78 struct md_enqueue_info
*se_minfo
;
79 /* pointer to the async getattr request */
80 struct ptlrpc_request
*se_req
;
81 /* pointer to the target inode */
82 struct inode
*se_inode
;
87 static unsigned int sai_generation
;
88 static DEFINE_SPINLOCK(sai_generation_lock
);
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry
*entry
)
92 return list_empty(&entry
->se_hash
);
96 * The entry only can be released by the caller, it is necessary to hold lock.
98 static inline int ll_sa_entry_stated(struct ll_sa_entry
*entry
)
101 return (entry
->se_stat
!= SA_ENTRY_INIT
);
104 static inline int ll_sa_entry_hash(int val
)
106 return val
& LL_SA_CACHE_MASK
;
110 * Insert entry to hash SA table.
113 ll_sa_entry_enhash(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
115 int i
= ll_sa_entry_hash(entry
->se_qstr
.hash
);
117 spin_lock(&sai
->sai_cache_lock
[i
]);
118 list_add_tail(&entry
->se_hash
, &sai
->sai_cache
[i
]);
119 spin_unlock(&sai
->sai_cache_lock
[i
]);
123 * Remove entry from SA table.
126 ll_sa_entry_unhash(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
128 int i
= ll_sa_entry_hash(entry
->se_qstr
.hash
);
130 spin_lock(&sai
->sai_cache_lock
[i
]);
131 list_del_init(&entry
->se_hash
);
132 spin_unlock(&sai
->sai_cache_lock
[i
]);
135 static inline int agl_should_run(struct ll_statahead_info
*sai
,
138 return (inode
!= NULL
&& S_ISREG(inode
->i_mode
) && sai
->sai_agl_valid
);
141 static inline struct ll_sa_entry
*
142 sa_first_received_entry(struct ll_statahead_info
*sai
)
144 return list_entry(sai
->sai_entries_received
.next
,
145 struct ll_sa_entry
, se_list
);
148 static inline struct ll_inode_info
*
149 agl_first_entry(struct ll_statahead_info
*sai
)
151 return list_entry(sai
->sai_entries_agl
.next
,
152 struct ll_inode_info
, lli_agl_list
);
155 static inline int sa_sent_full(struct ll_statahead_info
*sai
)
157 return atomic_read(&sai
->sai_cache_count
) >= sai
->sai_max
;
160 static inline int sa_received_empty(struct ll_statahead_info
*sai
)
162 return list_empty(&sai
->sai_entries_received
);
165 static inline int agl_list_empty(struct ll_statahead_info
*sai
)
167 return list_empty(&sai
->sai_entries_agl
);
171 * (1) hit ratio less than 80%
173 * (2) consecutive miss more than 8
174 * then means low hit.
176 static inline int sa_low_hit(struct ll_statahead_info
*sai
)
178 return ((sai
->sai_hit
> 7 && sai
->sai_hit
< 4 * sai
->sai_miss
) ||
179 (sai
->sai_consecutive_miss
> 8));
183 * If the given index is behind of statahead window more than
184 * SA_OMITTED_ENTRY_MAX, then it is old.
186 static inline int is_omitted_entry(struct ll_statahead_info
*sai
, __u64 index
)
188 return ((__u64
)sai
->sai_max
+ index
+ SA_OMITTED_ENTRY_MAX
<
193 * Insert it into sai_entries tail when init.
195 static struct ll_sa_entry
*
196 ll_sa_entry_alloc(struct ll_statahead_info
*sai
, __u64 index
,
197 const char *name
, int len
)
199 struct ll_inode_info
*lli
;
200 struct ll_sa_entry
*entry
;
204 entry_size
= sizeof(struct ll_sa_entry
) + (len
& ~3) + 4;
205 entry
= kzalloc(entry_size
, GFP_NOFS
);
206 if (unlikely(!entry
))
207 return ERR_PTR(-ENOMEM
);
209 CDEBUG(D_READA
, "alloc sa entry %.*s(%p) index %llu\n",
210 len
, name
, entry
, index
);
212 entry
->se_index
= index
;
215 * Statahead entry reference rules:
217 * 1) When statahead entry is initialized, its reference is set as 2.
218 * One reference is used by the directory scanner. When the scanner
219 * searches the statahead cache for the given name, it can perform
220 * lockless hash lookup (only the scanner can remove entry from hash
221 * list), and once found, it needn't to call "atomic_inc()" for the
222 * entry reference. So the performance is improved. After using the
223 * statahead entry, the scanner will call "atomic_dec()" to drop the
224 * reference held when initialization. If it is the last reference,
225 * the statahead entry will be freed.
227 * 2) All other threads, including statahead thread and ptlrpcd thread,
228 * when they process the statahead entry, the reference for target
229 * should be held to guarantee the entry will not be released by the
230 * directory scanner. After processing the entry, these threads will
231 * drop the entry reference. If it is the last reference, the entry
234 * The second reference when initializes the statahead entry is used
235 * by the statahead thread, following the rule 2).
237 atomic_set(&entry
->se_refcount
, 2);
238 entry
->se_stat
= SA_ENTRY_INIT
;
239 entry
->se_size
= entry_size
;
240 dname
= (char *)entry
+ sizeof(struct ll_sa_entry
);
241 memcpy(dname
, name
, len
);
243 entry
->se_qstr
.hash
= full_name_hash(name
, len
);
244 entry
->se_qstr
.len
= len
;
245 entry
->se_qstr
.name
= dname
;
247 lli
= ll_i2info(sai
->sai_inode
);
248 spin_lock(&lli
->lli_sa_lock
);
249 list_add_tail(&entry
->se_link
, &sai
->sai_entries
);
250 INIT_LIST_HEAD(&entry
->se_list
);
251 ll_sa_entry_enhash(sai
, entry
);
252 spin_unlock(&lli
->lli_sa_lock
);
254 atomic_inc(&sai
->sai_cache_count
);
260 * Used by the directory scanner to search entry with name.
262 * Only the caller can remove the entry from hash, so it is unnecessary to hold
263 * hash lock. It is caller's duty to release the init refcount on the entry, so
264 * it is also unnecessary to increase refcount on the entry.
266 static struct ll_sa_entry
*
267 ll_sa_entry_get_byname(struct ll_statahead_info
*sai
, const struct qstr
*qstr
)
269 struct ll_sa_entry
*entry
;
270 int i
= ll_sa_entry_hash(qstr
->hash
);
272 list_for_each_entry(entry
, &sai
->sai_cache
[i
], se_hash
) {
273 if (entry
->se_qstr
.hash
== qstr
->hash
&&
274 entry
->se_qstr
.len
== qstr
->len
&&
275 memcmp(entry
->se_qstr
.name
, qstr
->name
, qstr
->len
) == 0)
282 * Used by the async getattr request callback to find entry with index.
284 * Inside lli_sa_lock to prevent others to change the list during the search.
285 * It needs to increase entry refcount before returning to guarantee that the
286 * entry cannot be freed by others.
288 static struct ll_sa_entry
*
289 ll_sa_entry_get_byindex(struct ll_statahead_info
*sai
, __u64 index
)
291 struct ll_sa_entry
*entry
;
293 list_for_each_entry(entry
, &sai
->sai_entries
, se_link
) {
294 if (entry
->se_index
== index
) {
295 LASSERT(atomic_read(&entry
->se_refcount
) > 0);
296 atomic_inc(&entry
->se_refcount
);
299 if (entry
->se_index
> index
)
305 static void ll_sa_entry_cleanup(struct ll_statahead_info
*sai
,
306 struct ll_sa_entry
*entry
)
308 struct md_enqueue_info
*minfo
= entry
->se_minfo
;
309 struct ptlrpc_request
*req
= entry
->se_req
;
312 entry
->se_minfo
= NULL
;
313 ll_intent_release(&minfo
->mi_it
);
319 entry
->se_req
= NULL
;
320 ptlrpc_req_finished(req
);
324 static void ll_sa_entry_put(struct ll_statahead_info
*sai
,
325 struct ll_sa_entry
*entry
)
327 if (atomic_dec_and_test(&entry
->se_refcount
)) {
328 CDEBUG(D_READA
, "free sa entry %.*s(%p) index %llu\n",
329 entry
->se_qstr
.len
, entry
->se_qstr
.name
, entry
,
332 LASSERT(list_empty(&entry
->se_link
));
333 LASSERT(list_empty(&entry
->se_list
));
334 LASSERT(ll_sa_entry_unhashed(entry
));
336 ll_sa_entry_cleanup(sai
, entry
);
337 iput(entry
->se_inode
);
339 OBD_FREE(entry
, entry
->se_size
);
340 atomic_dec(&sai
->sai_cache_count
);
345 do_sa_entry_fini(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
347 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
349 LASSERT(!ll_sa_entry_unhashed(entry
));
350 LASSERT(!list_empty(&entry
->se_link
));
352 ll_sa_entry_unhash(sai
, entry
);
354 spin_lock(&lli
->lli_sa_lock
);
355 entry
->se_stat
= SA_ENTRY_DEST
;
356 list_del_init(&entry
->se_link
);
357 if (likely(!list_empty(&entry
->se_list
)))
358 list_del_init(&entry
->se_list
);
359 spin_unlock(&lli
->lli_sa_lock
);
361 ll_sa_entry_put(sai
, entry
);
365 * Delete it from sai_entries_stated list when fini.
368 ll_sa_entry_fini(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
370 struct ll_sa_entry
*pos
, *next
;
373 do_sa_entry_fini(sai
, entry
);
375 /* drop old entry, only 'scanner' process does this, no need to lock */
376 list_for_each_entry_safe(pos
, next
, &sai
->sai_entries
, se_link
) {
377 if (!is_omitted_entry(sai
, pos
->se_index
))
379 do_sa_entry_fini(sai
, pos
);
384 * Inside lli_sa_lock.
387 do_sa_entry_to_stated(struct ll_statahead_info
*sai
,
388 struct ll_sa_entry
*entry
, se_stat_t stat
)
390 struct ll_sa_entry
*se
;
391 struct list_head
*pos
= &sai
->sai_entries_stated
;
393 if (!list_empty(&entry
->se_list
))
394 list_del_init(&entry
->se_list
);
396 list_for_each_entry_reverse(se
, &sai
->sai_entries_stated
, se_list
) {
397 if (se
->se_index
< entry
->se_index
) {
403 list_add(&entry
->se_list
, pos
);
404 entry
->se_stat
= stat
;
408 * Move entry to sai_entries_stated and sort with the index.
409 * \retval 1 -- entry to be destroyed.
410 * \retval 0 -- entry is inserted into stated list.
413 ll_sa_entry_to_stated(struct ll_statahead_info
*sai
,
414 struct ll_sa_entry
*entry
, se_stat_t stat
)
416 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
419 ll_sa_entry_cleanup(sai
, entry
);
421 spin_lock(&lli
->lli_sa_lock
);
422 if (likely(entry
->se_stat
!= SA_ENTRY_DEST
)) {
423 do_sa_entry_to_stated(sai
, entry
, stat
);
426 spin_unlock(&lli
->lli_sa_lock
);
432 * Insert inode into the list of sai_entries_agl.
434 static void ll_agl_add(struct ll_statahead_info
*sai
,
435 struct inode
*inode
, int index
)
437 struct ll_inode_info
*child
= ll_i2info(inode
);
438 struct ll_inode_info
*parent
= ll_i2info(sai
->sai_inode
);
441 spin_lock(&child
->lli_agl_lock
);
442 if (child
->lli_agl_index
== 0) {
443 child
->lli_agl_index
= index
;
444 spin_unlock(&child
->lli_agl_lock
);
446 LASSERT(list_empty(&child
->lli_agl_list
));
449 spin_lock(&parent
->lli_agl_lock
);
450 if (agl_list_empty(sai
))
452 list_add_tail(&child
->lli_agl_list
, &sai
->sai_entries_agl
);
453 spin_unlock(&parent
->lli_agl_lock
);
455 spin_unlock(&child
->lli_agl_lock
);
459 wake_up(&sai
->sai_agl_thread
.t_ctl_waitq
);
462 static struct ll_statahead_info
*ll_sai_alloc(void)
464 struct ll_statahead_info
*sai
;
467 sai
= kzalloc(sizeof(*sai
), GFP_NOFS
);
471 atomic_set(&sai
->sai_refcount
, 1);
473 spin_lock(&sai_generation_lock
);
474 sai
->sai_generation
= ++sai_generation
;
475 if (unlikely(sai_generation
== 0))
476 sai
->sai_generation
= ++sai_generation
;
477 spin_unlock(&sai_generation_lock
);
479 sai
->sai_max
= LL_SA_RPC_MIN
;
481 init_waitqueue_head(&sai
->sai_waitq
);
482 init_waitqueue_head(&sai
->sai_thread
.t_ctl_waitq
);
483 init_waitqueue_head(&sai
->sai_agl_thread
.t_ctl_waitq
);
485 INIT_LIST_HEAD(&sai
->sai_entries
);
486 INIT_LIST_HEAD(&sai
->sai_entries_received
);
487 INIT_LIST_HEAD(&sai
->sai_entries_stated
);
488 INIT_LIST_HEAD(&sai
->sai_entries_agl
);
490 for (i
= 0; i
< LL_SA_CACHE_SIZE
; i
++) {
491 INIT_LIST_HEAD(&sai
->sai_cache
[i
]);
492 spin_lock_init(&sai
->sai_cache_lock
[i
]);
494 atomic_set(&sai
->sai_cache_count
, 0);
499 static inline struct ll_statahead_info
*
500 ll_sai_get(struct ll_statahead_info
*sai
)
502 atomic_inc(&sai
->sai_refcount
);
506 static void ll_sai_put(struct ll_statahead_info
*sai
)
508 struct inode
*inode
= sai
->sai_inode
;
509 struct ll_inode_info
*lli
= ll_i2info(inode
);
511 if (atomic_dec_and_lock(&sai
->sai_refcount
, &lli
->lli_sa_lock
)) {
512 struct ll_sa_entry
*entry
, *next
;
514 if (unlikely(atomic_read(&sai
->sai_refcount
) > 0)) {
515 /* It is race case, the interpret callback just hold
516 * a reference count */
517 spin_unlock(&lli
->lli_sa_lock
);
521 LASSERT(lli
->lli_opendir_key
== NULL
);
522 LASSERT(thread_is_stopped(&sai
->sai_thread
));
523 LASSERT(thread_is_stopped(&sai
->sai_agl_thread
));
526 lli
->lli_opendir_pid
= 0;
527 spin_unlock(&lli
->lli_sa_lock
);
529 if (sai
->sai_sent
> sai
->sai_replied
)
530 CDEBUG(D_READA
, "statahead for dir "DFID
531 " does not finish: [sent:%llu] [replied:%llu]\n",
533 sai
->sai_sent
, sai
->sai_replied
);
535 list_for_each_entry_safe(entry
, next
,
536 &sai
->sai_entries
, se_link
)
537 do_sa_entry_fini(sai
, entry
);
539 LASSERT(list_empty(&sai
->sai_entries
));
540 LASSERT(sa_received_empty(sai
));
541 LASSERT(list_empty(&sai
->sai_entries_stated
));
543 LASSERT(atomic_read(&sai
->sai_cache_count
) == 0);
544 LASSERT(agl_list_empty(sai
));
551 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
552 static void ll_agl_trigger(struct inode
*inode
, struct ll_statahead_info
*sai
)
554 struct ll_inode_info
*lli
= ll_i2info(inode
);
555 __u64 index
= lli
->lli_agl_index
;
558 LASSERT(list_empty(&lli
->lli_agl_list
));
560 /* AGL maybe fall behind statahead with one entry */
561 if (is_omitted_entry(sai
, index
+ 1)) {
562 lli
->lli_agl_index
= 0;
567 /* Someone is in glimpse (sync or async), do nothing. */
568 rc
= down_write_trylock(&lli
->lli_glimpse_sem
);
570 lli
->lli_agl_index
= 0;
576 * Someone triggered glimpse within 1 sec before.
577 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
578 * if the lock is still cached on client, AGL needs to do nothing. If
579 * it is cancelled by other client, AGL maybe cannot obtain new lock
580 * for no glimpse callback triggered by AGL.
581 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
582 * Under such case, it is quite possible that the OST will not grant
583 * glimpse lock for AGL also.
584 * 3) The former glimpse failed, compared with other two cases, it is
585 * relative rare. AGL can ignore such case, and it will not muchly
586 * affect the performance.
588 if (lli
->lli_glimpse_time
!= 0 &&
589 time_before(cfs_time_shift(-1), lli
->lli_glimpse_time
)) {
590 up_write(&lli
->lli_glimpse_sem
);
591 lli
->lli_agl_index
= 0;
596 CDEBUG(D_READA
, "Handling (init) async glimpse: inode = "
597 DFID
", idx = %llu\n", PFID(&lli
->lli_fid
), index
);
600 lli
->lli_agl_index
= 0;
601 lli
->lli_glimpse_time
= cfs_time_current();
602 up_write(&lli
->lli_glimpse_sem
);
604 CDEBUG(D_READA
, "Handled (init) async glimpse: inode= "
605 DFID
", idx = %llu, rc = %d\n",
606 PFID(&lli
->lli_fid
), index
, rc
);
611 static void ll_post_statahead(struct ll_statahead_info
*sai
)
613 struct inode
*dir
= sai
->sai_inode
;
615 struct ll_inode_info
*lli
= ll_i2info(dir
);
616 struct ll_sa_entry
*entry
;
617 struct md_enqueue_info
*minfo
;
618 struct lookup_intent
*it
;
619 struct ptlrpc_request
*req
;
620 struct mdt_body
*body
;
623 spin_lock(&lli
->lli_sa_lock
);
624 if (unlikely(sa_received_empty(sai
))) {
625 spin_unlock(&lli
->lli_sa_lock
);
628 entry
= sa_first_received_entry(sai
);
629 atomic_inc(&entry
->se_refcount
);
630 list_del_init(&entry
->se_list
);
631 spin_unlock(&lli
->lli_sa_lock
);
633 LASSERT(entry
->se_handle
!= 0);
635 minfo
= entry
->se_minfo
;
638 body
= req_capsule_server_get(&req
->rq_pill
, &RMF_MDT_BODY
);
644 child
= entry
->se_inode
;
649 LASSERT(fid_is_zero(&minfo
->mi_data
.op_fid2
));
651 /* XXX: No fid in reply, this is probably cross-ref case.
652 * SA can't handle it yet. */
653 if (body
->valid
& OBD_MD_MDS
) {
661 /* unlinked and re-created with the same name */
662 if (unlikely(!lu_fid_eq(&minfo
->mi_data
.op_fid2
, &body
->fid1
))){
663 entry
->se_inode
= NULL
;
669 it
->d
.lustre
.it_lock_handle
= entry
->se_handle
;
670 rc
= md_revalidate_lock(ll_i2mdexp(dir
), it
, ll_inode2fid(dir
), NULL
);
676 rc
= ll_prep_inode(&child
, req
, dir
->i_sb
, it
);
680 CDEBUG(D_DLMTRACE
, "setting l_data to inode %p (%lu/%u)\n",
681 child
, child
->i_ino
, child
->i_generation
);
682 ll_set_lock_data(ll_i2sbi(dir
)->ll_md_exp
, child
, it
, NULL
);
684 entry
->se_inode
= child
;
686 if (agl_should_run(sai
, child
))
687 ll_agl_add(sai
, child
, entry
->se_index
);
690 /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
691 * reference count by calling "ll_intent_drop_lock()" in spite of the
692 * above operations failed or not. Do not worry about calling
693 * "ll_intent_drop_lock()" more than once. */
694 rc
= ll_sa_entry_to_stated(sai
, entry
,
695 rc
< 0 ? SA_ENTRY_INVA
: SA_ENTRY_SUCC
);
696 if (rc
== 0 && entry
->se_index
== sai
->sai_index_wait
)
697 wake_up(&sai
->sai_waitq
);
698 ll_sa_entry_put(sai
, entry
);
701 static int ll_statahead_interpret(struct ptlrpc_request
*req
,
702 struct md_enqueue_info
*minfo
, int rc
)
704 struct lookup_intent
*it
= &minfo
->mi_it
;
705 struct inode
*dir
= minfo
->mi_dir
;
706 struct ll_inode_info
*lli
= ll_i2info(dir
);
707 struct ll_statahead_info
*sai
= NULL
;
708 struct ll_sa_entry
*entry
;
712 if (it_disposition(it
, DISP_LOOKUP_NEG
))
716 /* release ibits lock ASAP to avoid deadlock when statahead
717 * thread enqueues lock on parent in readdir and another
718 * process enqueues lock on child with parent lock held, eg.
720 handle
= it
->d
.lustre
.it_lock_handle
;
721 ll_intent_drop_lock(it
);
724 spin_lock(&lli
->lli_sa_lock
);
726 if (unlikely(lli
->lli_sai
== NULL
||
727 lli
->lli_sai
->sai_generation
!= minfo
->mi_generation
)) {
728 spin_unlock(&lli
->lli_sa_lock
);
732 sai
= ll_sai_get(lli
->lli_sai
);
733 if (unlikely(!thread_is_running(&sai
->sai_thread
))) {
735 spin_unlock(&lli
->lli_sa_lock
);
740 entry
= ll_sa_entry_get_byindex(sai
, minfo
->mi_cbdata
);
743 spin_unlock(&lli
->lli_sa_lock
);
749 do_sa_entry_to_stated(sai
, entry
, SA_ENTRY_INVA
);
750 wakeup
= (entry
->se_index
== sai
->sai_index_wait
);
752 entry
->se_minfo
= minfo
;
753 entry
->se_req
= ptlrpc_request_addref(req
);
754 /* Release the async ibits lock ASAP to avoid deadlock
755 * when statahead thread tries to enqueue lock on parent
756 * for readpage and other tries to enqueue lock on child
757 * with parent's lock held, for example: unlink. */
758 entry
->se_handle
= handle
;
759 wakeup
= sa_received_empty(sai
);
760 list_add_tail(&entry
->se_list
,
761 &sai
->sai_entries_received
);
764 spin_unlock(&lli
->lli_sa_lock
);
766 ll_sa_entry_put(sai
, entry
);
768 wake_up(&sai
->sai_thread
.t_ctl_waitq
);
773 ll_intent_release(it
);
782 static void sa_args_fini(struct md_enqueue_info
*minfo
,
783 struct ldlm_enqueue_info
*einfo
)
785 LASSERT(minfo
&& einfo
);
787 capa_put(minfo
->mi_data
.op_capa1
);
788 capa_put(minfo
->mi_data
.op_capa2
);
794 * There is race condition between "capa_put" and "ll_statahead_interpret" for
795 * accessing "op_data.op_capa[1,2]" as following:
796 * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
797 * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
798 * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
799 * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
800 * "md_intent_getattr_async".
802 static int sa_args_init(struct inode
*dir
, struct inode
*child
,
803 struct ll_sa_entry
*entry
, struct md_enqueue_info
**pmi
,
804 struct ldlm_enqueue_info
**pei
,
805 struct obd_capa
**pcapa
)
807 struct qstr
*qstr
= &entry
->se_qstr
;
808 struct ll_inode_info
*lli
= ll_i2info(dir
);
809 struct md_enqueue_info
*minfo
;
810 struct ldlm_enqueue_info
*einfo
;
811 struct md_op_data
*op_data
;
813 einfo
= kzalloc(sizeof(*einfo
), GFP_NOFS
);
817 minfo
= kzalloc(sizeof(*minfo
), GFP_NOFS
);
823 op_data
= ll_prep_md_op_data(&minfo
->mi_data
, dir
, child
, qstr
->name
,
824 qstr
->len
, 0, LUSTRE_OPC_ANY
, NULL
);
825 if (IS_ERR(op_data
)) {
828 return PTR_ERR(op_data
);
831 minfo
->mi_it
.it_op
= IT_GETATTR
;
832 minfo
->mi_dir
= igrab(dir
);
833 minfo
->mi_cb
= ll_statahead_interpret
;
834 minfo
->mi_generation
= lli
->lli_sai
->sai_generation
;
835 minfo
->mi_cbdata
= entry
->se_index
;
837 einfo
->ei_type
= LDLM_IBITS
;
838 einfo
->ei_mode
= it_to_lock_mode(&minfo
->mi_it
);
839 einfo
->ei_cb_bl
= ll_md_blocking_ast
;
840 einfo
->ei_cb_cp
= ldlm_completion_ast
;
841 einfo
->ei_cb_gl
= NULL
;
842 einfo
->ei_cbdata
= NULL
;
846 pcapa
[0] = op_data
->op_capa1
;
847 pcapa
[1] = op_data
->op_capa2
;
852 static int do_sa_lookup(struct inode
*dir
, struct ll_sa_entry
*entry
)
854 struct md_enqueue_info
*minfo
;
855 struct ldlm_enqueue_info
*einfo
;
856 struct obd_capa
*capas
[2];
859 rc
= sa_args_init(dir
, NULL
, entry
, &minfo
, &einfo
, capas
);
863 rc
= md_intent_getattr_async(ll_i2mdexp(dir
), minfo
, einfo
);
868 sa_args_fini(minfo
, einfo
);
875 * similar to ll_revalidate_it().
876 * \retval 1 -- dentry valid
877 * \retval 0 -- will send stat-ahead request
878 * \retval others -- prepare stat-ahead request failed
880 static int do_sa_revalidate(struct inode
*dir
, struct ll_sa_entry
*entry
,
881 struct dentry
*dentry
)
883 struct inode
*inode
= d_inode(dentry
);
884 struct lookup_intent it
= { .it_op
= IT_GETATTR
,
885 .d
.lustre
.it_lock_handle
= 0 };
886 struct md_enqueue_info
*minfo
;
887 struct ldlm_enqueue_info
*einfo
;
888 struct obd_capa
*capas
[2];
891 if (unlikely(inode
== NULL
))
894 if (d_mountpoint(dentry
))
897 entry
->se_inode
= igrab(inode
);
898 rc
= md_revalidate_lock(ll_i2mdexp(dir
), &it
, ll_inode2fid(inode
),
901 entry
->se_handle
= it
.d
.lustre
.it_lock_handle
;
902 ll_intent_release(&it
);
906 rc
= sa_args_init(dir
, inode
, entry
, &minfo
, &einfo
, capas
);
908 entry
->se_inode
= NULL
;
913 rc
= md_intent_getattr_async(ll_i2mdexp(dir
), minfo
, einfo
);
918 entry
->se_inode
= NULL
;
920 sa_args_fini(minfo
, einfo
);
926 static void ll_statahead_one(struct dentry
*parent
, const char *entry_name
,
929 struct inode
*dir
= d_inode(parent
);
930 struct ll_inode_info
*lli
= ll_i2info(dir
);
931 struct ll_statahead_info
*sai
= lli
->lli_sai
;
932 struct dentry
*dentry
= NULL
;
933 struct ll_sa_entry
*entry
;
937 entry
= ll_sa_entry_alloc(sai
, sai
->sai_index
, entry_name
,
942 dentry
= d_lookup(parent
, &entry
->se_qstr
);
944 rc
= do_sa_lookup(dir
, entry
);
946 rc
= do_sa_revalidate(dir
, entry
, dentry
);
947 if (rc
== 1 && agl_should_run(sai
, d_inode(dentry
)))
948 ll_agl_add(sai
, d_inode(dentry
), entry
->se_index
);
955 rc1
= ll_sa_entry_to_stated(sai
, entry
,
956 rc
< 0 ? SA_ENTRY_INVA
: SA_ENTRY_SUCC
);
957 if (rc1
== 0 && entry
->se_index
== sai
->sai_index_wait
)
958 wake_up(&sai
->sai_waitq
);
964 /* drop one refcount on entry by ll_sa_entry_alloc */
965 ll_sa_entry_put(sai
, entry
);
968 static int ll_agl_thread(void *arg
)
970 struct dentry
*parent
= (struct dentry
*)arg
;
971 struct inode
*dir
= d_inode(parent
);
972 struct ll_inode_info
*plli
= ll_i2info(dir
);
973 struct ll_inode_info
*clli
;
974 struct ll_sb_info
*sbi
= ll_i2sbi(dir
);
975 struct ll_statahead_info
*sai
= ll_sai_get(plli
->lli_sai
);
976 struct ptlrpc_thread
*thread
= &sai
->sai_agl_thread
;
977 struct l_wait_info lwi
= { 0 };
979 thread
->t_pid
= current_pid();
980 CDEBUG(D_READA
, "agl thread started: sai %p, parent %pd\n",
983 atomic_inc(&sbi
->ll_agl_total
);
984 spin_lock(&plli
->lli_agl_lock
);
985 sai
->sai_agl_valid
= 1;
986 if (thread_is_init(thread
))
987 /* If someone else has changed the thread state
988 * (e.g. already changed to SVC_STOPPING), we can't just
989 * blindly overwrite that setting. */
990 thread_set_flags(thread
, SVC_RUNNING
);
991 spin_unlock(&plli
->lli_agl_lock
);
992 wake_up(&thread
->t_ctl_waitq
);
995 l_wait_event(thread
->t_ctl_waitq
,
996 !agl_list_empty(sai
) ||
997 !thread_is_running(thread
),
1000 if (!thread_is_running(thread
))
1003 spin_lock(&plli
->lli_agl_lock
);
1004 /* The statahead thread maybe help to process AGL entries,
1005 * so check whether list empty again. */
1006 if (!agl_list_empty(sai
)) {
1007 clli
= agl_first_entry(sai
);
1008 list_del_init(&clli
->lli_agl_list
);
1009 spin_unlock(&plli
->lli_agl_lock
);
1010 ll_agl_trigger(&clli
->lli_vfs_inode
, sai
);
1012 spin_unlock(&plli
->lli_agl_lock
);
1016 spin_lock(&plli
->lli_agl_lock
);
1017 sai
->sai_agl_valid
= 0;
1018 while (!agl_list_empty(sai
)) {
1019 clli
= agl_first_entry(sai
);
1020 list_del_init(&clli
->lli_agl_list
);
1021 spin_unlock(&plli
->lli_agl_lock
);
1022 clli
->lli_agl_index
= 0;
1023 iput(&clli
->lli_vfs_inode
);
1024 spin_lock(&plli
->lli_agl_lock
);
1026 thread_set_flags(thread
, SVC_STOPPED
);
1027 spin_unlock(&plli
->lli_agl_lock
);
1028 wake_up(&thread
->t_ctl_waitq
);
1030 CDEBUG(D_READA
, "agl thread stopped: sai %p, parent %pd\n",
1035 static void ll_start_agl(struct dentry
*parent
, struct ll_statahead_info
*sai
)
1037 struct ptlrpc_thread
*thread
= &sai
->sai_agl_thread
;
1038 struct l_wait_info lwi
= { 0 };
1039 struct ll_inode_info
*plli
;
1040 struct task_struct
*task
;
1042 CDEBUG(D_READA
, "start agl thread: sai %p, parent %pd\n",
1045 plli
= ll_i2info(d_inode(parent
));
1046 task
= kthread_run(ll_agl_thread
, parent
,
1047 "ll_agl_%u", plli
->lli_opendir_pid
);
1049 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task
));
1050 thread_set_flags(thread
, SVC_STOPPED
);
1054 l_wait_event(thread
->t_ctl_waitq
,
1055 thread_is_running(thread
) || thread_is_stopped(thread
),
1059 static int ll_statahead_thread(void *arg
)
1061 struct dentry
*parent
= (struct dentry
*)arg
;
1062 struct inode
*dir
= d_inode(parent
);
1063 struct ll_inode_info
*plli
= ll_i2info(dir
);
1064 struct ll_inode_info
*clli
;
1065 struct ll_sb_info
*sbi
= ll_i2sbi(dir
);
1066 struct ll_statahead_info
*sai
= ll_sai_get(plli
->lli_sai
);
1067 struct ptlrpc_thread
*thread
= &sai
->sai_thread
;
1068 struct ptlrpc_thread
*agl_thread
= &sai
->sai_agl_thread
;
1073 struct ll_dir_chain chain
;
1074 struct l_wait_info lwi
= { 0 };
1076 thread
->t_pid
= current_pid();
1077 CDEBUG(D_READA
, "statahead thread starting: sai %p, parent %pd\n",
1080 if (sbi
->ll_flags
& LL_SBI_AGL_ENABLED
)
1081 ll_start_agl(parent
, sai
);
1083 atomic_inc(&sbi
->ll_sa_total
);
1084 spin_lock(&plli
->lli_sa_lock
);
1085 if (thread_is_init(thread
))
1086 /* If someone else has changed the thread state
1087 * (e.g. already changed to SVC_STOPPING), we can't just
1088 * blindly overwrite that setting. */
1089 thread_set_flags(thread
, SVC_RUNNING
);
1090 spin_unlock(&plli
->lli_sa_lock
);
1091 wake_up(&thread
->t_ctl_waitq
);
1093 ll_dir_chain_init(&chain
);
1094 page
= ll_get_dir_page(dir
, pos
, &chain
);
1097 struct lu_dirpage
*dp
;
1098 struct lu_dirent
*ent
;
1102 CDEBUG(D_READA
, "error reading dir "DFID
" at %llu/%llu: [rc %d] [parent %u]\n",
1103 PFID(ll_inode2fid(dir
)), pos
, sai
->sai_index
,
1104 rc
, plli
->lli_opendir_pid
);
1108 dp
= page_address(page
);
1109 for (ent
= lu_dirent_start(dp
); ent
!= NULL
;
1110 ent
= lu_dirent_next(ent
)) {
1115 hash
= le64_to_cpu(ent
->lde_hash
);
1116 if (unlikely(hash
< pos
))
1118 * Skip until we find target hash value.
1122 namelen
= le16_to_cpu(ent
->lde_namelen
);
1123 if (unlikely(namelen
== 0))
1125 * Skip dummy record.
1129 name
= ent
->lde_name
;
1130 if (name
[0] == '.') {
1136 } else if (name
[1] == '.' && namelen
== 2) {
1141 } else if (!sai
->sai_ls_all
) {
1143 * skip hidden files.
1145 sai
->sai_skip_hidden
++;
1151 * don't stat-ahead first entry.
1153 if (unlikely(++first
== 1))
1157 l_wait_event(thread
->t_ctl_waitq
,
1158 !sa_sent_full(sai
) ||
1159 !sa_received_empty(sai
) ||
1160 !agl_list_empty(sai
) ||
1161 !thread_is_running(thread
),
1165 while (!sa_received_empty(sai
))
1166 ll_post_statahead(sai
);
1168 if (unlikely(!thread_is_running(thread
))) {
1169 ll_release_page(page
, 0);
1174 /* If no window for metadata statahead, but there are
1175 * some AGL entries to be triggered, then try to help
1176 * to process the AGL entries. */
1177 if (sa_sent_full(sai
)) {
1178 spin_lock(&plli
->lli_agl_lock
);
1179 while (!agl_list_empty(sai
)) {
1180 clli
= agl_first_entry(sai
);
1181 list_del_init(&clli
->lli_agl_list
);
1182 spin_unlock(&plli
->lli_agl_lock
);
1183 ll_agl_trigger(&clli
->lli_vfs_inode
,
1186 if (!sa_received_empty(sai
))
1190 !thread_is_running(thread
))) {
1191 ll_release_page(page
, 0);
1196 if (!sa_sent_full(sai
))
1199 spin_lock(&plli
->lli_agl_lock
);
1201 spin_unlock(&plli
->lli_agl_lock
);
1207 ll_statahead_one(parent
, name
, namelen
);
1209 pos
= le64_to_cpu(dp
->ldp_hash_end
);
1210 if (pos
== MDS_DIR_END_OFF
) {
1212 * End of directory reached.
1214 ll_release_page(page
, 0);
1216 l_wait_event(thread
->t_ctl_waitq
,
1217 !sa_received_empty(sai
) ||
1218 sai
->sai_sent
== sai
->sai_replied
||
1219 !thread_is_running(thread
),
1222 while (!sa_received_empty(sai
))
1223 ll_post_statahead(sai
);
1225 if (unlikely(!thread_is_running(thread
))) {
1230 if (sai
->sai_sent
== sai
->sai_replied
&&
1231 sa_received_empty(sai
))
1235 spin_lock(&plli
->lli_agl_lock
);
1236 while (!agl_list_empty(sai
) &&
1237 thread_is_running(thread
)) {
1238 clli
= agl_first_entry(sai
);
1239 list_del_init(&clli
->lli_agl_list
);
1240 spin_unlock(&plli
->lli_agl_lock
);
1241 ll_agl_trigger(&clli
->lli_vfs_inode
, sai
);
1242 spin_lock(&plli
->lli_agl_lock
);
1244 spin_unlock(&plli
->lli_agl_lock
);
1250 * chain is exhausted.
1251 * Normal case: continue to the next page.
1253 ll_release_page(page
, le32_to_cpu(dp
->ldp_flags
) &
1255 page
= ll_get_dir_page(dir
, pos
, &chain
);
1257 LASSERT(le32_to_cpu(dp
->ldp_flags
) & LDF_COLLIDE
);
1258 ll_release_page(page
, 1);
1260 * go into overflow page.
1266 if (sai
->sai_agl_valid
) {
1267 spin_lock(&plli
->lli_agl_lock
);
1268 thread_set_flags(agl_thread
, SVC_STOPPING
);
1269 spin_unlock(&plli
->lli_agl_lock
);
1270 wake_up(&agl_thread
->t_ctl_waitq
);
1272 CDEBUG(D_READA
, "stop agl thread: sai %p pid %u\n",
1273 sai
, (unsigned int)agl_thread
->t_pid
);
1274 l_wait_event(agl_thread
->t_ctl_waitq
,
1275 thread_is_stopped(agl_thread
),
1278 /* Set agl_thread flags anyway. */
1279 thread_set_flags(&sai
->sai_agl_thread
, SVC_STOPPED
);
1281 ll_dir_chain_fini(&chain
);
1282 spin_lock(&plli
->lli_sa_lock
);
1283 if (!sa_received_empty(sai
)) {
1284 thread_set_flags(thread
, SVC_STOPPING
);
1285 spin_unlock(&plli
->lli_sa_lock
);
1287 /* To release the resources held by received entries. */
1288 while (!sa_received_empty(sai
))
1289 ll_post_statahead(sai
);
1291 spin_lock(&plli
->lli_sa_lock
);
1293 thread_set_flags(thread
, SVC_STOPPED
);
1294 spin_unlock(&plli
->lli_sa_lock
);
1295 wake_up(&sai
->sai_waitq
);
1296 wake_up(&thread
->t_ctl_waitq
);
1299 CDEBUG(D_READA
, "statahead thread stopped: sai %p, parent %pd\n",
1305 * called in ll_file_release().
1307 void ll_stop_statahead(struct inode
*dir
, void *key
)
1309 struct ll_inode_info
*lli
= ll_i2info(dir
);
1311 if (unlikely(key
== NULL
))
1314 spin_lock(&lli
->lli_sa_lock
);
1315 if (lli
->lli_opendir_key
!= key
|| lli
->lli_opendir_pid
== 0) {
1316 spin_unlock(&lli
->lli_sa_lock
);
1320 lli
->lli_opendir_key
= NULL
;
1323 struct l_wait_info lwi
= { 0 };
1324 struct ptlrpc_thread
*thread
= &lli
->lli_sai
->sai_thread
;
1326 if (!thread_is_stopped(thread
)) {
1327 thread_set_flags(thread
, SVC_STOPPING
);
1328 spin_unlock(&lli
->lli_sa_lock
);
1329 wake_up(&thread
->t_ctl_waitq
);
1331 CDEBUG(D_READA
, "stop statahead thread: sai %p pid %u\n",
1332 lli
->lli_sai
, (unsigned int)thread
->t_pid
);
1333 l_wait_event(thread
->t_ctl_waitq
,
1334 thread_is_stopped(thread
),
1337 spin_unlock(&lli
->lli_sa_lock
);
1341 * Put the ref which was held when first statahead_enter.
1342 * It maybe not the last ref for some statahead requests
1345 ll_sai_put(lli
->lli_sai
);
1347 lli
->lli_opendir_pid
= 0;
1348 spin_unlock(&lli
->lli_sa_lock
);
1354 * not first dirent, or is "."
1356 LS_NONE_FIRST_DE
= 0,
1358 * the first non-hidden dirent
1362 * the first hidden dirent, that is "."
1367 static int is_first_dirent(struct inode
*dir
, struct dentry
*dentry
)
1369 struct ll_dir_chain chain
;
1370 struct qstr
*target
= &dentry
->d_name
;
1374 int rc
= LS_NONE_FIRST_DE
;
1376 ll_dir_chain_init(&chain
);
1377 page
= ll_get_dir_page(dir
, pos
, &chain
);
1380 struct lu_dirpage
*dp
;
1381 struct lu_dirent
*ent
;
1384 struct ll_inode_info
*lli
= ll_i2info(dir
);
1387 CERROR("error reading dir "DFID
" at %llu: [rc %d] [parent %u]\n",
1388 PFID(ll_inode2fid(dir
)), pos
,
1389 rc
, lli
->lli_opendir_pid
);
1393 dp
= page_address(page
);
1394 for (ent
= lu_dirent_start(dp
); ent
!= NULL
;
1395 ent
= lu_dirent_next(ent
)) {
1400 hash
= le64_to_cpu(ent
->lde_hash
);
1401 /* The ll_get_dir_page() can return any page containing
1402 * the given hash which may be not the start hash. */
1403 if (unlikely(hash
< pos
))
1406 namelen
= le16_to_cpu(ent
->lde_namelen
);
1407 if (unlikely(namelen
== 0))
1409 * skip dummy record.
1413 name
= ent
->lde_name
;
1414 if (name
[0] == '.') {
1420 else if (name
[1] == '.' && namelen
== 2)
1431 if (dot_de
&& target
->name
[0] != '.') {
1432 CDEBUG(D_READA
, "%.*s skip hidden file %.*s\n",
1433 target
->len
, target
->name
,
1438 if (target
->len
!= namelen
||
1439 memcmp(target
->name
, name
, namelen
) != 0)
1440 rc
= LS_NONE_FIRST_DE
;
1444 rc
= LS_FIRST_DOT_DE
;
1446 ll_release_page(page
, 0);
1449 pos
= le64_to_cpu(dp
->ldp_hash_end
);
1450 if (pos
== MDS_DIR_END_OFF
) {
1452 * End of directory reached.
1454 ll_release_page(page
, 0);
1458 * chain is exhausted
1459 * Normal case: continue to the next page.
1461 ll_release_page(page
, le32_to_cpu(dp
->ldp_flags
) &
1463 page
= ll_get_dir_page(dir
, pos
, &chain
);
1466 * go into overflow page.
1468 LASSERT(le32_to_cpu(dp
->ldp_flags
) & LDF_COLLIDE
);
1469 ll_release_page(page
, 1);
1474 ll_dir_chain_fini(&chain
);
1479 ll_sai_unplug(struct ll_statahead_info
*sai
, struct ll_sa_entry
*entry
)
1481 struct ptlrpc_thread
*thread
= &sai
->sai_thread
;
1482 struct ll_sb_info
*sbi
= ll_i2sbi(sai
->sai_inode
);
1485 if (entry
!= NULL
&& entry
->se_stat
== SA_ENTRY_SUCC
)
1490 ll_sa_entry_fini(sai
, entry
);
1493 sai
->sai_consecutive_miss
= 0;
1494 sai
->sai_max
= min(2 * sai
->sai_max
, sbi
->ll_sa_max
);
1496 struct ll_inode_info
*lli
= ll_i2info(sai
->sai_inode
);
1499 sai
->sai_consecutive_miss
++;
1500 if (sa_low_hit(sai
) && thread_is_running(thread
)) {
1501 atomic_inc(&sbi
->ll_sa_wrong
);
1502 CDEBUG(D_READA
, "Statahead for dir " DFID
" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1503 PFID(&lli
->lli_fid
), sai
->sai_hit
,
1504 sai
->sai_miss
, sai
->sai_sent
,
1506 spin_lock(&lli
->lli_sa_lock
);
1507 if (!thread_is_stopped(thread
))
1508 thread_set_flags(thread
, SVC_STOPPING
);
1509 spin_unlock(&lli
->lli_sa_lock
);
1513 if (!thread_is_stopped(thread
))
1514 wake_up(&thread
->t_ctl_waitq
);
1518 * Start statahead thread if this is the first dir entry.
1519 * Otherwise if a thread is started already, wait it until it is ahead of me.
1520 * \retval 1 -- find entry with lock in cache, the caller needs to do
1522 * \retval 0 -- find entry in cache, but without lock, the caller needs
1524 * \retval others -- the caller need to process as non-statahead.
1526 int do_statahead_enter(struct inode
*dir
, struct dentry
**dentryp
,
1529 struct ll_inode_info
*lli
= ll_i2info(dir
);
1530 struct ll_statahead_info
*sai
= lli
->lli_sai
;
1531 struct dentry
*parent
;
1532 struct ll_sa_entry
*entry
;
1533 struct ptlrpc_thread
*thread
;
1534 struct l_wait_info lwi
= { 0 };
1536 struct ll_inode_info
*plli
;
1538 LASSERT(lli
->lli_opendir_pid
== current_pid());
1541 thread
= &sai
->sai_thread
;
1542 if (unlikely(thread_is_stopped(thread
) &&
1543 list_empty(&sai
->sai_entries_stated
))) {
1544 /* to release resource */
1545 ll_stop_statahead(dir
, lli
->lli_opendir_key
);
1549 if ((*dentryp
)->d_name
.name
[0] == '.') {
1550 if (sai
->sai_ls_all
||
1551 sai
->sai_miss_hidden
>= sai
->sai_skip_hidden
) {
1553 * Hidden dentry is the first one, or statahead
1554 * thread does not skip so many hidden dentries
1555 * before "sai_ls_all" enabled as below.
1558 if (!sai
->sai_ls_all
)
1560 * It maybe because hidden dentry is not
1561 * the first one, "sai_ls_all" was not
1562 * set, then "ls -al" missed. Enable
1563 * "sai_ls_all" for such case.
1565 sai
->sai_ls_all
= 1;
1568 * Such "getattr" has been skipped before
1569 * "sai_ls_all" enabled as above.
1571 sai
->sai_miss_hidden
++;
1576 entry
= ll_sa_entry_get_byname(sai
, &(*dentryp
)->d_name
);
1577 if (entry
== NULL
|| only_unplug
) {
1578 ll_sai_unplug(sai
, entry
);
1579 return entry
? 1 : -EAGAIN
;
1582 if (!ll_sa_entry_stated(entry
)) {
1583 sai
->sai_index_wait
= entry
->se_index
;
1584 lwi
= LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL
,
1585 LWI_ON_SIGNAL_NOOP
, NULL
);
1586 rc
= l_wait_event(sai
->sai_waitq
,
1587 ll_sa_entry_stated(entry
) ||
1588 thread_is_stopped(thread
),
1591 ll_sai_unplug(sai
, entry
);
1596 if (entry
->se_stat
== SA_ENTRY_SUCC
&&
1597 entry
->se_inode
!= NULL
) {
1598 struct inode
*inode
= entry
->se_inode
;
1599 struct lookup_intent it
= { .it_op
= IT_GETATTR
,
1600 .d
.lustre
.it_lock_handle
=
1604 rc
= md_revalidate_lock(ll_i2mdexp(dir
), &it
,
1605 ll_inode2fid(inode
), &bits
);
1607 if (d_inode(*dentryp
) == NULL
) {
1608 struct dentry
*alias
;
1610 alias
= ll_splice_alias(inode
,
1612 if (IS_ERR(alias
)) {
1613 ll_sai_unplug(sai
, entry
);
1614 return PTR_ERR(alias
);
1617 } else if (d_inode(*dentryp
) != inode
) {
1618 /* revalidate, but inode is recreated */
1620 "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
1622 d_inode(*dentryp
)->i_ino
,
1623 d_inode(*dentryp
)->i_generation
,
1625 inode
->i_generation
);
1626 ll_sai_unplug(sai
, entry
);
1631 entry
->se_inode
= NULL
;
1633 if ((bits
& MDS_INODELOCK_LOOKUP
) &&
1634 d_lustre_invalid(*dentryp
))
1635 d_lustre_revalidate(*dentryp
);
1636 ll_intent_release(&it
);
1640 ll_sai_unplug(sai
, entry
);
1644 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1645 rc
= is_first_dirent(dir
, *dentryp
);
1646 if (rc
== LS_NONE_FIRST_DE
) {
1647 /* It is not "ls -{a}l" operation, no need statahead for it. */
1652 sai
= ll_sai_alloc();
1658 sai
->sai_ls_all
= (rc
== LS_FIRST_DOT_DE
);
1659 sai
->sai_inode
= igrab(dir
);
1660 if (unlikely(sai
->sai_inode
== NULL
)) {
1661 CWARN("Do not start stat ahead on dying inode "DFID
"\n",
1662 PFID(&lli
->lli_fid
));
1667 /* get parent reference count here, and put it in ll_statahead_thread */
1668 parent
= dget((*dentryp
)->d_parent
);
1669 if (unlikely(sai
->sai_inode
!= d_inode(parent
))) {
1670 struct ll_inode_info
*nlli
= ll_i2info(d_inode(parent
));
1672 CWARN("Race condition, someone changed %pd just now: old parent "DFID
", new parent "DFID
"\n",
1674 PFID(&lli
->lli_fid
), PFID(&nlli
->lli_fid
));
1676 iput(sai
->sai_inode
);
1681 CDEBUG(D_READA
, "start statahead thread: sai %p, parent %pd\n",
1684 /* The sai buffer already has one reference taken at allocation time,
1685 * but as soon as we expose the sai by attaching it to the lli that
1686 * default reference can be dropped by another thread calling
1687 * ll_stop_statahead. We need to take a local reference to protect
1688 * the sai buffer while we intend to access it. */
1692 plli
= ll_i2info(d_inode(parent
));
1693 rc
= PTR_ERR(kthread_run(ll_statahead_thread
, parent
,
1694 "ll_sa_%u", plli
->lli_opendir_pid
));
1695 thread
= &sai
->sai_thread
;
1696 if (IS_ERR_VALUE(rc
)) {
1697 CERROR("can't start ll_sa thread, rc: %d\n", rc
);
1699 lli
->lli_opendir_key
= NULL
;
1700 thread_set_flags(thread
, SVC_STOPPED
);
1701 thread_set_flags(&sai
->sai_agl_thread
, SVC_STOPPED
);
1702 /* Drop both our own local reference and the default
1703 * reference from allocation time. */
1706 LASSERT(lli
->lli_sai
== NULL
);
1710 l_wait_event(thread
->t_ctl_waitq
,
1711 thread_is_running(thread
) || thread_is_stopped(thread
),
1716 * We don't stat-ahead for the first dirent since we are already in
1724 spin_lock(&lli
->lli_sa_lock
);
1725 lli
->lli_opendir_key
= NULL
;
1726 lli
->lli_opendir_pid
= 0;
1727 spin_unlock(&lli
->lli_sa_lock
);