2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
34 #define NFSDBG_FACILITY NFSDBG_PNFS
39 * protects pnfs_modules_tbl.
41 static DEFINE_SPINLOCK(pnfs_spinlock
);
44 * pnfs_modules_tbl holds all pnfs modules
46 static LIST_HEAD(pnfs_modules_tbl
);
48 /* Return the registered pnfs layout driver module matching given id */
49 static struct pnfs_layoutdriver_type
*
50 find_pnfs_driver_locked(u32 id
)
52 struct pnfs_layoutdriver_type
*local
;
54 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
59 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
63 static struct pnfs_layoutdriver_type
*
64 find_pnfs_driver(u32 id
)
66 struct pnfs_layoutdriver_type
*local
;
68 spin_lock(&pnfs_spinlock
);
69 local
= find_pnfs_driver_locked(id
);
70 spin_unlock(&pnfs_spinlock
);
75 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
77 if (nfss
->pnfs_curr_ld
) {
78 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
79 module_put(nfss
->pnfs_curr_ld
->owner
);
81 nfss
->pnfs_curr_ld
= NULL
;
85 * Try to set the server's pnfs module to the pnfs layout type specified by id.
86 * Currently only one pNFS layout driver per filesystem is supported.
88 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
91 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
93 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
97 if (!(server
->nfs_client
->cl_exchange_flags
&
98 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
99 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
100 id
, server
->nfs_client
->cl_exchange_flags
);
103 ld_type
= find_pnfs_driver(id
);
105 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
106 ld_type
= find_pnfs_driver(id
);
108 dprintk("%s: No pNFS module found for %u.\n",
113 if (!try_module_get(ld_type
->owner
)) {
114 dprintk("%s: Could not grab reference on module\n", __func__
);
117 server
->pnfs_curr_ld
= ld_type
;
118 if (ld_type
->set_layoutdriver(server
)) {
120 "%s: Error initializing mount point for layout driver %u.\n",
122 module_put(ld_type
->owner
);
125 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
129 dprintk("%s: Using NFSv4 I/O\n", __func__
);
130 server
->pnfs_curr_ld
= NULL
;
134 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
136 int status
= -EINVAL
;
137 struct pnfs_layoutdriver_type
*tmp
;
139 if (ld_type
->id
== 0) {
140 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
143 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
144 printk(KERN_ERR
"%s Layout driver must provide "
145 "alloc_lseg and free_lseg.\n", __func__
);
149 spin_lock(&pnfs_spinlock
);
150 tmp
= find_pnfs_driver_locked(ld_type
->id
);
152 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
154 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
157 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
158 __func__
, ld_type
->id
);
160 spin_unlock(&pnfs_spinlock
);
164 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
167 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
169 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
170 spin_lock(&pnfs_spinlock
);
171 list_del(&ld_type
->pnfs_tblid
);
172 spin_unlock(&pnfs_spinlock
);
174 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
177 * pNFS client layout cache
181 get_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
183 assert_spin_locked(&lo
->plh_inode
->i_lock
);
188 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
190 assert_spin_locked(&lo
->plh_inode
->i_lock
);
191 BUG_ON(lo
->plh_refcount
== 0);
194 if (!lo
->plh_refcount
) {
195 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
196 BUG_ON(!list_empty(&lo
->plh_layouts
));
197 NFS_I(lo
->plh_inode
)->layout
= NULL
;
203 put_layout_hdr(struct inode
*inode
)
205 spin_lock(&inode
->i_lock
);
206 put_layout_hdr_locked(NFS_I(inode
)->layout
);
207 spin_unlock(&inode
->i_lock
);
211 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
213 INIT_LIST_HEAD(&lseg
->pls_list
);
214 atomic_set(&lseg
->pls_refcount
, 1);
216 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
217 lseg
->pls_layout
= lo
;
220 static void free_lseg(struct pnfs_layout_segment
*lseg
)
222 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
224 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
225 /* Matched by get_layout_hdr in pnfs_insert_layout */
229 /* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
230 * could sleep, so must be called outside of the lock.
231 * Returns 1 if object was removed, otherwise return 0.
234 put_lseg_locked(struct pnfs_layout_segment
*lseg
,
235 struct list_head
*tmp_list
)
237 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
238 atomic_read(&lseg
->pls_refcount
),
239 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
240 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
241 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
243 BUG_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
244 list_del(&lseg
->pls_list
);
245 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
246 struct nfs_client
*clp
;
248 clp
= NFS_SERVER(ino
)->nfs_client
;
249 spin_lock(&clp
->cl_lock
);
250 /* List does not take a reference, so no need for put here */
251 list_del_init(&lseg
->pls_layout
->plh_layouts
);
252 spin_unlock(&clp
->cl_lock
);
254 list_add(&lseg
->pls_list
, tmp_list
);
261 should_free_lseg(u32 lseg_iomode
, u32 recall_iomode
)
263 return (recall_iomode
== IOMODE_ANY
||
264 lseg_iomode
== recall_iomode
);
267 /* Returns 1 if lseg is removed from list, 0 otherwise */
268 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
269 struct list_head
*tmp_list
)
273 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
274 /* Remove the reference keeping the lseg in the
275 * list. It will now be removed when all
276 * outstanding io is finished.
278 rv
= put_lseg_locked(lseg
, tmp_list
);
283 /* Returns count of number of matching invalid lsegs remaining in list
287 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
288 struct list_head
*tmp_list
,
291 struct pnfs_layout_segment
*lseg
, *next
;
292 int invalid
= 0, removed
= 0;
294 dprintk("%s:Begin lo %p\n", __func__
, lo
);
296 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
297 if (should_free_lseg(lseg
->pls_range
.iomode
, iomode
)) {
298 dprintk("%s: freeing lseg %p iomode %d "
299 "offset %llu length %llu\n", __func__
,
300 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
301 lseg
->pls_range
.length
);
303 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
305 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
306 return invalid
- removed
;
310 pnfs_free_lseg_list(struct list_head
*free_me
)
312 struct pnfs_layout_segment
*lseg
, *tmp
;
314 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
315 list_del(&lseg
->pls_list
);
321 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
323 struct pnfs_layout_hdr
*lo
;
326 spin_lock(&nfsi
->vfs_inode
.i_lock
);
329 set_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
);
330 mark_matching_lsegs_invalid(lo
, &tmp_list
, IOMODE_ANY
);
331 /* Matched by refcount set to 1 in alloc_init_layout_hdr */
332 put_layout_hdr_locked(lo
);
334 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
335 pnfs_free_lseg_list(&tmp_list
);
339 * Called by the state manger to remove all layouts established under an
343 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
345 struct pnfs_layout_hdr
*lo
;
348 spin_lock(&clp
->cl_lock
);
349 list_splice_init(&clp
->cl_layouts
, &tmp_list
);
350 spin_unlock(&clp
->cl_lock
);
352 while (!list_empty(&tmp_list
)) {
353 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
355 dprintk("%s freeing layout for inode %lu\n", __func__
,
356 lo
->plh_inode
->i_ino
);
357 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
361 /* update lo->plh_stateid with new if is more recent */
363 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
,
364 const nfs4_stateid
*new)
368 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
369 newseq
= be32_to_cpu(new->stateid
.seqid
);
370 if ((int)(newseq
- oldseq
) > 0)
371 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
375 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
376 struct nfs4_state
*open_state
)
380 dprintk("--> %s\n", __func__
);
381 spin_lock(&lo
->plh_inode
->i_lock
);
382 if (list_empty(&lo
->plh_segs
)) {
386 seq
= read_seqbegin(&open_state
->seqlock
);
387 memcpy(dst
->data
, open_state
->stateid
.data
,
388 sizeof(open_state
->stateid
.data
));
389 } while (read_seqretry(&open_state
->seqlock
, seq
));
391 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
392 spin_unlock(&lo
->plh_inode
->i_lock
);
393 dprintk("<-- %s\n", __func__
);
398 * Get layout from server.
399 * for now, assume that whole file layouts are requested.
401 * arg->length: all ones
403 static struct pnfs_layout_segment
*
404 send_layoutget(struct pnfs_layout_hdr
*lo
,
405 struct nfs_open_context
*ctx
,
408 struct inode
*ino
= lo
->plh_inode
;
409 struct nfs_server
*server
= NFS_SERVER(ino
);
410 struct nfs4_layoutget
*lgp
;
411 struct pnfs_layout_segment
*lseg
= NULL
;
413 dprintk("--> %s\n", __func__
);
416 lgp
= kzalloc(sizeof(*lgp
), GFP_KERNEL
);
418 put_layout_hdr(lo
->plh_inode
);
421 lgp
->args
.minlength
= NFS4_MAX_UINT64
;
422 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
423 lgp
->args
.range
.iomode
= iomode
;
424 lgp
->args
.range
.offset
= 0;
425 lgp
->args
.range
.length
= NFS4_MAX_UINT64
;
426 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
427 lgp
->args
.inode
= ino
;
428 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
431 /* Synchronously retrieve layout information from server and
434 nfs4_proc_layoutget(lgp
);
436 /* remember that LAYOUTGET failed and suspend trying */
437 set_bit(lo_fail_bit(iomode
), &lo
->plh_flags
);
443 * Compare two layout segments for sorting into layout cache.
444 * We want to preferentially return RW over RO layouts, so ensure those
448 cmp_layout(u32 iomode1
, u32 iomode2
)
450 /* read > read/write */
451 return (int)(iomode2
== IOMODE_READ
) - (int)(iomode1
== IOMODE_READ
);
455 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
456 struct pnfs_layout_segment
*lseg
)
458 struct pnfs_layout_segment
*lp
;
461 dprintk("%s:Begin\n", __func__
);
463 assert_spin_locked(&lo
->plh_inode
->i_lock
);
464 if (list_empty(&lo
->plh_segs
)) {
465 struct nfs_client
*clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
467 spin_lock(&clp
->cl_lock
);
468 BUG_ON(!list_empty(&lo
->plh_layouts
));
469 list_add_tail(&lo
->plh_layouts
, &clp
->cl_layouts
);
470 spin_unlock(&clp
->cl_lock
);
472 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
473 if (cmp_layout(lp
->pls_range
.iomode
, lseg
->pls_range
.iomode
) > 0)
475 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
476 dprintk("%s: inserted lseg %p "
477 "iomode %d offset %llu length %llu before "
478 "lp %p iomode %d offset %llu length %llu\n",
479 __func__
, lseg
, lseg
->pls_range
.iomode
,
480 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
481 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
482 lp
->pls_range
.length
);
487 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
488 dprintk("%s: inserted lseg %p "
489 "iomode %d offset %llu length %llu at tail\n",
490 __func__
, lseg
, lseg
->pls_range
.iomode
,
491 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
493 get_layout_hdr_locked(lo
);
495 dprintk("%s:Return\n", __func__
);
498 static struct pnfs_layout_hdr
*
499 alloc_init_layout_hdr(struct inode
*ino
)
501 struct pnfs_layout_hdr
*lo
;
503 lo
= kzalloc(sizeof(struct pnfs_layout_hdr
), GFP_KERNEL
);
506 lo
->plh_refcount
= 1;
507 INIT_LIST_HEAD(&lo
->plh_layouts
);
508 INIT_LIST_HEAD(&lo
->plh_segs
);
513 static struct pnfs_layout_hdr
*
514 pnfs_find_alloc_layout(struct inode
*ino
)
516 struct nfs_inode
*nfsi
= NFS_I(ino
);
517 struct pnfs_layout_hdr
*new = NULL
;
519 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
521 assert_spin_locked(&ino
->i_lock
);
523 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
528 spin_unlock(&ino
->i_lock
);
529 new = alloc_init_layout_hdr(ino
);
530 spin_lock(&ino
->i_lock
);
532 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
540 * iomode matching rules:
551 is_matching_lseg(struct pnfs_layout_segment
*lseg
, u32 iomode
)
553 return (iomode
!= IOMODE_RW
|| lseg
->pls_range
.iomode
== IOMODE_RW
);
557 * lookup range in layout
559 static struct pnfs_layout_segment
*
560 pnfs_has_layout(struct pnfs_layout_hdr
*lo
, u32 iomode
)
562 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
564 dprintk("%s:Begin\n", __func__
);
566 assert_spin_locked(&lo
->plh_inode
->i_lock
);
567 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
568 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
569 is_matching_lseg(lseg
, iomode
)) {
573 if (cmp_layout(iomode
, lseg
->pls_range
.iomode
) > 0)
577 dprintk("%s:Return lseg %p ref %d\n",
578 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
583 * Layout segment is retreived from the server if not cached.
584 * The appropriate layout segment is referenced and returned to the caller.
586 struct pnfs_layout_segment
*
587 pnfs_update_layout(struct inode
*ino
,
588 struct nfs_open_context
*ctx
,
589 enum pnfs_iomode iomode
)
591 struct nfs_inode
*nfsi
= NFS_I(ino
);
592 struct pnfs_layout_hdr
*lo
;
593 struct pnfs_layout_segment
*lseg
= NULL
;
595 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
597 spin_lock(&ino
->i_lock
);
598 lo
= pnfs_find_alloc_layout(ino
);
600 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
604 /* Check to see if the layout for the given range already exists */
605 lseg
= pnfs_has_layout(lo
, iomode
);
607 dprintk("%s: Using cached lseg %p for iomode %d)\n",
608 __func__
, lseg
, iomode
);
612 /* if LAYOUTGET already failed once we don't try again */
613 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
616 get_layout_hdr_locked(lo
); /* Matched in nfs4_layoutget_release */
617 spin_unlock(&ino
->i_lock
);
619 lseg
= send_layoutget(lo
, ctx
, iomode
);
621 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
622 nfsi
->layout
->plh_flags
, lseg
);
625 spin_unlock(&ino
->i_lock
);
630 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
632 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
633 struct nfs4_layoutget_res
*res
= &lgp
->res
;
634 struct pnfs_layout_segment
*lseg
;
635 struct inode
*ino
= lo
->plh_inode
;
638 /* Inject layout blob into I/O device driver */
639 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
);
640 if (!lseg
|| IS_ERR(lseg
)) {
644 status
= PTR_ERR(lseg
);
645 dprintk("%s: Could not allocate layout: error %d\n",
650 spin_lock(&ino
->i_lock
);
652 lseg
->pls_range
= res
->range
;
654 pnfs_insert_layout(lo
, lseg
);
656 /* Done processing layoutget. Set the layout stateid */
657 pnfs_set_layout_stateid(lo
, &res
->stateid
);
658 spin_unlock(&ino
->i_lock
);
664 * Device ID cache. Currently supports one layout type per struct nfs_client.
665 * Add layout type to the lookup key to expand to support multiple types.
668 pnfs_alloc_init_deviceid_cache(struct nfs_client
*clp
,
669 void (*free_callback
)(struct pnfs_deviceid_node
*))
671 struct pnfs_deviceid_cache
*c
;
673 c
= kzalloc(sizeof(struct pnfs_deviceid_cache
), GFP_KERNEL
);
676 spin_lock(&clp
->cl_lock
);
677 if (clp
->cl_devid_cache
!= NULL
) {
678 atomic_inc(&clp
->cl_devid_cache
->dc_ref
);
679 dprintk("%s [kref [%d]]\n", __func__
,
680 atomic_read(&clp
->cl_devid_cache
->dc_ref
));
683 /* kzalloc initializes hlists */
684 spin_lock_init(&c
->dc_lock
);
685 atomic_set(&c
->dc_ref
, 1);
686 c
->dc_free_callback
= free_callback
;
687 clp
->cl_devid_cache
= c
;
688 dprintk("%s [new]\n", __func__
);
690 spin_unlock(&clp
->cl_lock
);
693 EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache
);
696 * Called from pnfs_layoutdriver_type->free_lseg
697 * last layout segment reference frees deviceid
700 pnfs_put_deviceid(struct pnfs_deviceid_cache
*c
,
701 struct pnfs_deviceid_node
*devid
)
703 struct nfs4_deviceid
*id
= &devid
->de_id
;
704 struct pnfs_deviceid_node
*d
;
705 struct hlist_node
*n
;
706 long h
= nfs4_deviceid_hash(id
);
708 dprintk("%s [%d]\n", __func__
, atomic_read(&devid
->de_ref
));
709 if (!atomic_dec_and_lock(&devid
->de_ref
, &c
->dc_lock
))
712 hlist_for_each_entry_rcu(d
, n
, &c
->dc_deviceids
[h
], de_node
)
713 if (!memcmp(&d
->de_id
, id
, sizeof(*id
))) {
714 hlist_del_rcu(&d
->de_node
);
715 spin_unlock(&c
->dc_lock
);
717 c
->dc_free_callback(devid
);
720 spin_unlock(&c
->dc_lock
);
721 /* Why wasn't it found in the list? */
724 EXPORT_SYMBOL_GPL(pnfs_put_deviceid
);
726 /* Find and reference a deviceid */
727 struct pnfs_deviceid_node
*
728 pnfs_find_get_deviceid(struct pnfs_deviceid_cache
*c
, struct nfs4_deviceid
*id
)
730 struct pnfs_deviceid_node
*d
;
731 struct hlist_node
*n
;
732 long hash
= nfs4_deviceid_hash(id
);
734 dprintk("--> %s hash %ld\n", __func__
, hash
);
736 hlist_for_each_entry_rcu(d
, n
, &c
->dc_deviceids
[hash
], de_node
) {
737 if (!memcmp(&d
->de_id
, id
, sizeof(*id
))) {
738 if (!atomic_inc_not_zero(&d
->de_ref
)) {
750 EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid
);
753 * Add a deviceid to the cache.
754 * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
756 struct pnfs_deviceid_node
*
757 pnfs_add_deviceid(struct pnfs_deviceid_cache
*c
, struct pnfs_deviceid_node
*new)
759 struct pnfs_deviceid_node
*d
;
760 long hash
= nfs4_deviceid_hash(&new->de_id
);
762 dprintk("--> %s hash %ld\n", __func__
, hash
);
763 spin_lock(&c
->dc_lock
);
764 d
= pnfs_find_get_deviceid(c
, &new->de_id
);
766 spin_unlock(&c
->dc_lock
);
767 dprintk("%s [discard]\n", __func__
);
768 c
->dc_free_callback(new);
771 INIT_HLIST_NODE(&new->de_node
);
772 atomic_set(&new->de_ref
, 1);
773 hlist_add_head_rcu(&new->de_node
, &c
->dc_deviceids
[hash
]);
774 spin_unlock(&c
->dc_lock
);
775 dprintk("%s [new]\n", __func__
);
778 EXPORT_SYMBOL_GPL(pnfs_add_deviceid
);
781 pnfs_put_deviceid_cache(struct nfs_client
*clp
)
783 struct pnfs_deviceid_cache
*local
= clp
->cl_devid_cache
;
785 dprintk("--> %s cl_devid_cache %p\n", __func__
, clp
->cl_devid_cache
);
786 if (atomic_dec_and_lock(&local
->dc_ref
, &clp
->cl_lock
)) {
788 /* Verify cache is empty */
789 for (i
= 0; i
< NFS4_DEVICE_ID_HASH_SIZE
; i
++)
790 BUG_ON(!hlist_empty(&local
->dc_deviceids
[i
]));
791 clp
->cl_devid_cache
= NULL
;
792 spin_unlock(&clp
->cl_lock
);
796 EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache
);