2 * Copyright (c) 2014 Christoph Hellwig.
4 #include <linux/blkdev.h>
5 #include <linux/kmod.h>
6 #include <linux/file.h>
7 #include <linux/jhash.h>
8 #include <linux/sched.h>
9 #include <linux/sunrpc/addr.h>
15 #define NFSDDBG_FACILITY NFSDDBG_PNFS
18 struct list_head lo_perstate
;
19 struct nfs4_layout_stateid
*lo_state
;
20 struct nfsd4_layout_seg lo_seg
;
23 static struct kmem_cache
*nfs4_layout_cache
;
24 static struct kmem_cache
*nfs4_layout_stateid_cache
;
26 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops
;
27 static const struct lock_manager_operations nfsd4_layouts_lm_ops
;
29 const struct nfsd4_layout_ops
*nfsd4_layout_ops
[LAYOUT_TYPE_MAX
] = {
30 #ifdef CONFIG_NFSD_BLOCKLAYOUT
31 [LAYOUT_BLOCK_VOLUME
] = &bl_layout_ops
,
33 #ifdef CONFIG_NFSD_SCSILAYOUT
34 [LAYOUT_SCSI
] = &scsi_layout_ops
,
38 /* pNFS device ID to export fsid mapping */
39 #define DEVID_HASH_BITS 8
40 #define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
41 #define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
42 static u64 nfsd_devid_seq
= 1;
43 static struct list_head nfsd_devid_hash
[DEVID_HASH_SIZE
];
44 static DEFINE_SPINLOCK(nfsd_devid_lock
);
46 static inline u32
devid_hashfn(u64 idx
)
48 return jhash_2words(idx
, idx
>> 32, 0) & DEVID_HASH_MASK
;
52 nfsd4_alloc_devid_map(const struct svc_fh
*fhp
)
54 const struct knfsd_fh
*fh
= &fhp
->fh_handle
;
55 size_t fsid_len
= key_len(fh
->fh_fsid_type
);
56 struct nfsd4_deviceid_map
*map
, *old
;
59 map
= kzalloc(sizeof(*map
) + fsid_len
, GFP_KERNEL
);
63 map
->fsid_type
= fh
->fh_fsid_type
;
64 memcpy(&map
->fsid
, fh
->fh_fsid
, fsid_len
);
66 spin_lock(&nfsd_devid_lock
);
67 if (fhp
->fh_export
->ex_devid_map
)
70 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++) {
71 list_for_each_entry(old
, &nfsd_devid_hash
[i
], hash
) {
72 if (old
->fsid_type
!= fh
->fh_fsid_type
)
74 if (memcmp(old
->fsid
, fh
->fh_fsid
,
75 key_len(old
->fsid_type
)))
78 fhp
->fh_export
->ex_devid_map
= old
;
83 map
->idx
= nfsd_devid_seq
++;
84 list_add_tail_rcu(&map
->hash
, &nfsd_devid_hash
[devid_hashfn(map
->idx
)]);
85 fhp
->fh_export
->ex_devid_map
= map
;
89 spin_unlock(&nfsd_devid_lock
);
93 struct nfsd4_deviceid_map
*
94 nfsd4_find_devid_map(int idx
)
96 struct nfsd4_deviceid_map
*map
, *ret
= NULL
;
99 list_for_each_entry_rcu(map
, &nfsd_devid_hash
[devid_hashfn(idx
)], hash
)
108 nfsd4_set_deviceid(struct nfsd4_deviceid
*id
, const struct svc_fh
*fhp
,
109 u32 device_generation
)
111 if (!fhp
->fh_export
->ex_devid_map
) {
112 nfsd4_alloc_devid_map(fhp
);
113 if (!fhp
->fh_export
->ex_devid_map
)
117 id
->fsid_idx
= fhp
->fh_export
->ex_devid_map
->idx
;
118 id
->generation
= device_generation
;
123 void nfsd4_setup_layout_type(struct svc_export
*exp
)
125 struct super_block
*sb
= exp
->ex_path
.mnt
->mnt_sb
;
127 if (!(exp
->ex_flags
& NFSEXP_PNFS
))
131 * Check if the file system supports exporting a block-like layout.
132 * If the block device supports reservations prefer the SCSI layout,
133 * otherwise advertise the block layout.
135 #ifdef CONFIG_NFSD_BLOCKLAYOUT
136 if (sb
->s_export_op
->get_uuid
&&
137 sb
->s_export_op
->map_blocks
&&
138 sb
->s_export_op
->commit_blocks
)
139 exp
->ex_layout_type
= LAYOUT_BLOCK_VOLUME
;
141 #ifdef CONFIG_NFSD_SCSILAYOUT
142 /* overwrite block layout selection if needed */
143 if (sb
->s_export_op
->map_blocks
&&
144 sb
->s_export_op
->commit_blocks
&&
145 sb
->s_bdev
&& sb
->s_bdev
->bd_disk
->fops
->pr_ops
)
146 exp
->ex_layout_type
= LAYOUT_SCSI
;
151 nfsd4_free_layout_stateid(struct nfs4_stid
*stid
)
153 struct nfs4_layout_stateid
*ls
= layoutstateid(stid
);
154 struct nfs4_client
*clp
= ls
->ls_stid
.sc_client
;
155 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
157 trace_layoutstate_free(&ls
->ls_stid
.sc_stateid
);
159 spin_lock(&clp
->cl_lock
);
160 list_del_init(&ls
->ls_perclnt
);
161 spin_unlock(&clp
->cl_lock
);
163 spin_lock(&fp
->fi_lock
);
164 list_del_init(&ls
->ls_perfile
);
165 spin_unlock(&fp
->fi_lock
);
167 vfs_setlease(ls
->ls_file
, F_UNLCK
, NULL
, (void **)&ls
);
171 atomic_dec(&ls
->ls_stid
.sc_file
->fi_lo_recalls
);
173 kmem_cache_free(nfs4_layout_stateid_cache
, ls
);
177 nfsd4_layout_setlease(struct nfs4_layout_stateid
*ls
)
179 struct file_lock
*fl
;
182 fl
= locks_alloc_lock();
186 fl
->fl_lmops
= &nfsd4_layouts_lm_ops
;
187 fl
->fl_flags
= FL_LAYOUT
;
188 fl
->fl_type
= F_RDLCK
;
189 fl
->fl_end
= OFFSET_MAX
;
191 fl
->fl_pid
= current
->tgid
;
192 fl
->fl_file
= ls
->ls_file
;
194 status
= vfs_setlease(fl
->fl_file
, fl
->fl_type
, &fl
, NULL
);
203 static struct nfs4_layout_stateid
*
204 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state
*cstate
,
205 struct nfs4_stid
*parent
, u32 layout_type
)
207 struct nfs4_client
*clp
= cstate
->clp
;
208 struct nfs4_file
*fp
= parent
->sc_file
;
209 struct nfs4_layout_stateid
*ls
;
210 struct nfs4_stid
*stp
;
212 stp
= nfs4_alloc_stid(cstate
->clp
, nfs4_layout_stateid_cache
);
215 stp
->sc_free
= nfsd4_free_layout_stateid
;
219 ls
= layoutstateid(stp
);
220 INIT_LIST_HEAD(&ls
->ls_perclnt
);
221 INIT_LIST_HEAD(&ls
->ls_perfile
);
222 spin_lock_init(&ls
->ls_lock
);
223 INIT_LIST_HEAD(&ls
->ls_layouts
);
224 mutex_init(&ls
->ls_mutex
);
225 ls
->ls_layout_type
= layout_type
;
226 nfsd4_init_cb(&ls
->ls_recall
, clp
, &nfsd4_cb_layout_ops
,
227 NFSPROC4_CLNT_CB_LAYOUT
);
229 if (parent
->sc_type
== NFS4_DELEG_STID
)
230 ls
->ls_file
= get_file(fp
->fi_deleg_file
);
232 ls
->ls_file
= find_any_file(fp
);
233 BUG_ON(!ls
->ls_file
);
235 if (nfsd4_layout_setlease(ls
)) {
238 kmem_cache_free(nfs4_layout_stateid_cache
, ls
);
242 spin_lock(&clp
->cl_lock
);
243 stp
->sc_type
= NFS4_LAYOUT_STID
;
244 list_add(&ls
->ls_perclnt
, &clp
->cl_lo_states
);
245 spin_unlock(&clp
->cl_lock
);
247 spin_lock(&fp
->fi_lock
);
248 list_add(&ls
->ls_perfile
, &fp
->fi_lo_states
);
249 spin_unlock(&fp
->fi_lock
);
251 trace_layoutstate_alloc(&ls
->ls_stid
.sc_stateid
);
256 nfsd4_preprocess_layout_stateid(struct svc_rqst
*rqstp
,
257 struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
,
258 bool create
, u32 layout_type
, struct nfs4_layout_stateid
**lsp
)
260 struct nfs4_layout_stateid
*ls
;
261 struct nfs4_stid
*stid
;
262 unsigned char typemask
= NFS4_LAYOUT_STID
;
266 typemask
|= (NFS4_OPEN_STID
| NFS4_LOCK_STID
| NFS4_DELEG_STID
);
268 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &stid
,
269 net_generic(SVC_NET(rqstp
), nfsd_net_id
));
273 if (!fh_match(&cstate
->current_fh
.fh_handle
,
274 &stid
->sc_file
->fi_fhandle
)) {
275 status
= nfserr_bad_stateid
;
279 if (stid
->sc_type
!= NFS4_LAYOUT_STID
) {
280 ls
= nfsd4_alloc_layout_stateid(cstate
, stid
, layout_type
);
283 status
= nfserr_jukebox
;
286 mutex_lock(&ls
->ls_mutex
);
288 ls
= container_of(stid
, struct nfs4_layout_stateid
, ls_stid
);
290 status
= nfserr_bad_stateid
;
291 mutex_lock(&ls
->ls_mutex
);
292 if (stateid
->si_generation
> stid
->sc_stateid
.si_generation
)
293 goto out_unlock_stid
;
294 if (layout_type
!= ls
->ls_layout_type
)
295 goto out_unlock_stid
;
302 mutex_unlock(&ls
->ls_mutex
);
310 nfsd4_recall_file_layout(struct nfs4_layout_stateid
*ls
)
312 spin_lock(&ls
->ls_lock
);
316 ls
->ls_recalled
= true;
317 atomic_inc(&ls
->ls_stid
.sc_file
->fi_lo_recalls
);
318 if (list_empty(&ls
->ls_layouts
))
321 trace_layout_recall(&ls
->ls_stid
.sc_stateid
);
323 atomic_inc(&ls
->ls_stid
.sc_count
);
324 nfsd4_run_cb(&ls
->ls_recall
);
327 spin_unlock(&ls
->ls_lock
);
331 layout_end(struct nfsd4_layout_seg
*seg
)
333 u64 end
= seg
->offset
+ seg
->length
;
334 return end
>= seg
->offset
? end
: NFS4_MAX_UINT64
;
338 layout_update_len(struct nfsd4_layout_seg
*lo
, u64 end
)
340 if (end
== NFS4_MAX_UINT64
)
341 lo
->length
= NFS4_MAX_UINT64
;
343 lo
->length
= end
- lo
->offset
;
347 layouts_overlapping(struct nfs4_layout
*lo
, struct nfsd4_layout_seg
*s
)
349 if (s
->iomode
!= IOMODE_ANY
&& s
->iomode
!= lo
->lo_seg
.iomode
)
351 if (layout_end(&lo
->lo_seg
) <= s
->offset
)
353 if (layout_end(s
) <= lo
->lo_seg
.offset
)
359 layouts_try_merge(struct nfsd4_layout_seg
*lo
, struct nfsd4_layout_seg
*new)
361 if (lo
->iomode
!= new->iomode
)
363 if (layout_end(new) < lo
->offset
)
365 if (layout_end(lo
) < new->offset
)
368 lo
->offset
= min(lo
->offset
, new->offset
);
369 layout_update_len(lo
, max(layout_end(lo
), layout_end(new)));
374 nfsd4_recall_conflict(struct nfs4_layout_stateid
*ls
)
376 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
377 struct nfs4_layout_stateid
*l
, *n
;
378 __be32 nfserr
= nfs_ok
;
380 assert_spin_locked(&fp
->fi_lock
);
382 list_for_each_entry_safe(l
, n
, &fp
->fi_lo_states
, ls_perfile
) {
384 nfsd4_recall_file_layout(l
);
385 nfserr
= nfserr_recallconflict
;
393 nfsd4_insert_layout(struct nfsd4_layoutget
*lgp
, struct nfs4_layout_stateid
*ls
)
395 struct nfsd4_layout_seg
*seg
= &lgp
->lg_seg
;
396 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
397 struct nfs4_layout
*lp
, *new = NULL
;
400 spin_lock(&fp
->fi_lock
);
401 nfserr
= nfsd4_recall_conflict(ls
);
404 spin_lock(&ls
->ls_lock
);
405 list_for_each_entry(lp
, &ls
->ls_layouts
, lo_perstate
) {
406 if (layouts_try_merge(&lp
->lo_seg
, seg
))
409 spin_unlock(&ls
->ls_lock
);
410 spin_unlock(&fp
->fi_lock
);
412 new = kmem_cache_alloc(nfs4_layout_cache
, GFP_KERNEL
);
414 return nfserr_jukebox
;
415 memcpy(&new->lo_seg
, seg
, sizeof(lp
->lo_seg
));
418 spin_lock(&fp
->fi_lock
);
419 nfserr
= nfsd4_recall_conflict(ls
);
422 spin_lock(&ls
->ls_lock
);
423 list_for_each_entry(lp
, &ls
->ls_layouts
, lo_perstate
) {
424 if (layouts_try_merge(&lp
->lo_seg
, seg
))
428 atomic_inc(&ls
->ls_stid
.sc_count
);
429 list_add_tail(&new->lo_perstate
, &ls
->ls_layouts
);
432 nfs4_inc_and_copy_stateid(&lgp
->lg_sid
, &ls
->ls_stid
);
433 spin_unlock(&ls
->ls_lock
);
435 spin_unlock(&fp
->fi_lock
);
437 kmem_cache_free(nfs4_layout_cache
, new);
442 nfsd4_free_layouts(struct list_head
*reaplist
)
444 while (!list_empty(reaplist
)) {
445 struct nfs4_layout
*lp
= list_first_entry(reaplist
,
446 struct nfs4_layout
, lo_perstate
);
448 list_del(&lp
->lo_perstate
);
449 nfs4_put_stid(&lp
->lo_state
->ls_stid
);
450 kmem_cache_free(nfs4_layout_cache
, lp
);
455 nfsd4_return_file_layout(struct nfs4_layout
*lp
, struct nfsd4_layout_seg
*seg
,
456 struct list_head
*reaplist
)
458 struct nfsd4_layout_seg
*lo
= &lp
->lo_seg
;
459 u64 end
= layout_end(lo
);
461 if (seg
->offset
<= lo
->offset
) {
462 if (layout_end(seg
) >= end
) {
463 list_move_tail(&lp
->lo_perstate
, reaplist
);
466 lo
->offset
= layout_end(seg
);
468 /* retain the whole layout segment on a split. */
469 if (layout_end(seg
) < end
) {
470 dprintk("%s: split not supported\n", __func__
);
476 layout_update_len(lo
, end
);
480 nfsd4_return_file_layouts(struct svc_rqst
*rqstp
,
481 struct nfsd4_compound_state
*cstate
,
482 struct nfsd4_layoutreturn
*lrp
)
484 struct nfs4_layout_stateid
*ls
;
485 struct nfs4_layout
*lp
, *n
;
490 nfserr
= nfsd4_preprocess_layout_stateid(rqstp
, cstate
, &lrp
->lr_sid
,
491 false, lrp
->lr_layout_type
,
494 trace_layout_return_lookup_fail(&lrp
->lr_sid
);
498 spin_lock(&ls
->ls_lock
);
499 list_for_each_entry_safe(lp
, n
, &ls
->ls_layouts
, lo_perstate
) {
500 if (layouts_overlapping(lp
, &lrp
->lr_seg
)) {
501 nfsd4_return_file_layout(lp
, &lrp
->lr_seg
, &reaplist
);
505 if (!list_empty(&ls
->ls_layouts
)) {
507 nfs4_inc_and_copy_stateid(&lrp
->lr_sid
, &ls
->ls_stid
);
508 lrp
->lrs_present
= 1;
510 trace_layoutstate_unhash(&ls
->ls_stid
.sc_stateid
);
511 nfs4_unhash_stid(&ls
->ls_stid
);
512 lrp
->lrs_present
= 0;
514 spin_unlock(&ls
->ls_lock
);
516 mutex_unlock(&ls
->ls_mutex
);
517 nfs4_put_stid(&ls
->ls_stid
);
518 nfsd4_free_layouts(&reaplist
);
523 nfsd4_return_client_layouts(struct svc_rqst
*rqstp
,
524 struct nfsd4_compound_state
*cstate
,
525 struct nfsd4_layoutreturn
*lrp
)
527 struct nfs4_layout_stateid
*ls
, *n
;
528 struct nfs4_client
*clp
= cstate
->clp
;
529 struct nfs4_layout
*lp
, *t
;
532 lrp
->lrs_present
= 0;
534 spin_lock(&clp
->cl_lock
);
535 list_for_each_entry_safe(ls
, n
, &clp
->cl_lo_states
, ls_perclnt
) {
536 if (ls
->ls_layout_type
!= lrp
->lr_layout_type
)
539 if (lrp
->lr_return_type
== RETURN_FSID
&&
540 !fh_fsid_match(&ls
->ls_stid
.sc_file
->fi_fhandle
,
541 &cstate
->current_fh
.fh_handle
))
544 spin_lock(&ls
->ls_lock
);
545 list_for_each_entry_safe(lp
, t
, &ls
->ls_layouts
, lo_perstate
) {
546 if (lrp
->lr_seg
.iomode
== IOMODE_ANY
||
547 lrp
->lr_seg
.iomode
== lp
->lo_seg
.iomode
)
548 list_move_tail(&lp
->lo_perstate
, &reaplist
);
550 spin_unlock(&ls
->ls_lock
);
552 spin_unlock(&clp
->cl_lock
);
554 nfsd4_free_layouts(&reaplist
);
559 nfsd4_return_all_layouts(struct nfs4_layout_stateid
*ls
,
560 struct list_head
*reaplist
)
562 spin_lock(&ls
->ls_lock
);
563 list_splice_init(&ls
->ls_layouts
, reaplist
);
564 spin_unlock(&ls
->ls_lock
);
568 nfsd4_return_all_client_layouts(struct nfs4_client
*clp
)
570 struct nfs4_layout_stateid
*ls
, *n
;
573 spin_lock(&clp
->cl_lock
);
574 list_for_each_entry_safe(ls
, n
, &clp
->cl_lo_states
, ls_perclnt
)
575 nfsd4_return_all_layouts(ls
, &reaplist
);
576 spin_unlock(&clp
->cl_lock
);
578 nfsd4_free_layouts(&reaplist
);
582 nfsd4_return_all_file_layouts(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
584 struct nfs4_layout_stateid
*ls
, *n
;
587 spin_lock(&fp
->fi_lock
);
588 list_for_each_entry_safe(ls
, n
, &fp
->fi_lo_states
, ls_perfile
) {
589 if (ls
->ls_stid
.sc_client
== clp
)
590 nfsd4_return_all_layouts(ls
, &reaplist
);
592 spin_unlock(&fp
->fi_lock
);
594 nfsd4_free_layouts(&reaplist
);
598 nfsd4_cb_layout_fail(struct nfs4_layout_stateid
*ls
)
600 struct nfs4_client
*clp
= ls
->ls_stid
.sc_client
;
601 char addr_str
[INET6_ADDRSTRLEN
];
602 static char *envp
[] = {
605 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
611 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, addr_str
, sizeof(addr_str
));
614 "nfsd: client %s failed to respond to layout recall. "
615 " Fencing..\n", addr_str
);
617 argv
[0] = "/sbin/nfsd-recall-failed";
619 argv
[2] = ls
->ls_file
->f_path
.mnt
->mnt_sb
->s_id
;
622 error
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
624 printk(KERN_ERR
"nfsd: fence failed for client %s: %d!\n",
630 nfsd4_cb_layout_prepare(struct nfsd4_callback
*cb
)
632 struct nfs4_layout_stateid
*ls
=
633 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
635 mutex_lock(&ls
->ls_mutex
);
636 nfs4_inc_and_copy_stateid(&ls
->ls_recall_sid
, &ls
->ls_stid
);
637 mutex_unlock(&ls
->ls_mutex
);
641 nfsd4_cb_layout_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
643 struct nfs4_layout_stateid
*ls
=
644 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
647 const struct nfsd4_layout_ops
*ops
;
651 switch (task
->tk_status
) {
655 * Anything left? If not, then call it done. Note that we don't
656 * take the spinlock since this is an optimization and nothing
657 * should get added until the cb counter goes to zero.
659 if (list_empty(&ls
->ls_layouts
))
662 /* Poll the client until it's done with the layout */
664 nn
= net_generic(ls
->ls_stid
.sc_client
->net
, nfsd_net_id
);
666 /* Client gets 2 lease periods to return it */
667 cutoff
= ktime_add_ns(task
->tk_start
,
668 nn
->nfsd4_lease
* NSEC_PER_SEC
* 2);
670 if (ktime_before(now
, cutoff
)) {
671 rpc_delay(task
, HZ
/100); /* 10 mili-seconds */
675 case -NFS4ERR_NOMATCHING_LAYOUT
:
676 trace_layout_recall_done(&ls
->ls_stid
.sc_stateid
);
681 * Unknown error or non-responding client, we'll need to fence.
683 trace_layout_recall_fail(&ls
->ls_stid
.sc_stateid
);
685 ops
= nfsd4_layout_ops
[ls
->ls_layout_type
];
686 if (ops
->fence_client
)
687 ops
->fence_client(ls
);
689 nfsd4_cb_layout_fail(ls
);
695 nfsd4_cb_layout_release(struct nfsd4_callback
*cb
)
697 struct nfs4_layout_stateid
*ls
=
698 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
701 trace_layout_recall_release(&ls
->ls_stid
.sc_stateid
);
703 nfsd4_return_all_layouts(ls
, &reaplist
);
704 nfsd4_free_layouts(&reaplist
);
705 nfs4_put_stid(&ls
->ls_stid
);
708 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops
= {
709 .prepare
= nfsd4_cb_layout_prepare
,
710 .done
= nfsd4_cb_layout_done
,
711 .release
= nfsd4_cb_layout_release
,
715 nfsd4_layout_lm_break(struct file_lock
*fl
)
718 * We don't want the locks code to timeout the lease for us;
719 * we'll remove it ourself if a layout isn't returned
722 fl
->fl_break_time
= 0;
723 nfsd4_recall_file_layout(fl
->fl_owner
);
728 nfsd4_layout_lm_change(struct file_lock
*onlist
, int arg
,
729 struct list_head
*dispose
)
731 BUG_ON(!(arg
& F_UNLCK
));
732 return lease_modify(onlist
, arg
, dispose
);
735 static const struct lock_manager_operations nfsd4_layouts_lm_ops
= {
736 .lm_break
= nfsd4_layout_lm_break
,
737 .lm_change
= nfsd4_layout_lm_change
,
741 nfsd4_init_pnfs(void)
745 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++)
746 INIT_LIST_HEAD(&nfsd_devid_hash
[i
]);
748 nfs4_layout_cache
= kmem_cache_create("nfs4_layout",
749 sizeof(struct nfs4_layout
), 0, 0, NULL
);
750 if (!nfs4_layout_cache
)
753 nfs4_layout_stateid_cache
= kmem_cache_create("nfs4_layout_stateid",
754 sizeof(struct nfs4_layout_stateid
), 0, 0, NULL
);
755 if (!nfs4_layout_stateid_cache
) {
756 kmem_cache_destroy(nfs4_layout_cache
);
763 nfsd4_exit_pnfs(void)
767 kmem_cache_destroy(nfs4_layout_cache
);
768 kmem_cache_destroy(nfs4_layout_stateid_cache
);
770 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++) {
771 struct nfsd4_deviceid_map
*map
, *n
;
773 list_for_each_entry_safe(map
, n
, &nfsd_devid_hash
[i
], hash
)