]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/nfs/callback_proc.c | |
3 | * | |
4 | * Copyright (C) 2004 Trond Myklebust | |
5 | * | |
6 | * NFSv4 callback procedures | |
7 | */ | |
8 | #include <linux/nfs4.h> | |
9 | #include <linux/nfs_fs.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/rcupdate.h> | |
12 | #include "nfs4_fs.h" | |
13 | #include "callback.h" | |
14 | #include "delegation.h" | |
15 | #include "internal.h" | |
16 | #include "pnfs.h" | |
17 | #include "nfs4session.h" | |
18 | #include "nfs4trace.h" | |
19 | ||
20 | #define NFSDBG_FACILITY NFSDBG_CALLBACK | |
21 | ||
22 | __be32 nfs4_callback_getattr(struct cb_getattrargs *args, | |
23 | struct cb_getattrres *res, | |
24 | struct cb_process_state *cps) | |
25 | { | |
26 | struct nfs_delegation *delegation; | |
27 | struct nfs_inode *nfsi; | |
28 | struct inode *inode; | |
29 | ||
30 | res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION); | |
31 | if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ | |
32 | goto out; | |
33 | ||
34 | res->bitmap[0] = res->bitmap[1] = 0; | |
35 | res->status = htonl(NFS4ERR_BADHANDLE); | |
36 | ||
37 | dprintk_rcu("NFS: GETATTR callback request from %s\n", | |
38 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); | |
39 | ||
40 | inode = nfs_delegation_find_inode(cps->clp, &args->fh); | |
41 | if (inode == NULL) { | |
42 | trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL, | |
43 | -ntohl(res->status)); | |
44 | goto out; | |
45 | } | |
46 | nfsi = NFS_I(inode); | |
47 | rcu_read_lock(); | |
48 | delegation = rcu_dereference(nfsi->delegation); | |
49 | if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0) | |
50 | goto out_iput; | |
51 | res->size = i_size_read(inode); | |
52 | res->change_attr = delegation->change_attr; | |
53 | if (nfsi->nrequests != 0) | |
54 | res->change_attr++; | |
55 | res->ctime = inode->i_ctime; | |
56 | res->mtime = inode->i_mtime; | |
57 | res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) & | |
58 | args->bitmap[0]; | |
59 | res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) & | |
60 | args->bitmap[1]; | |
61 | res->status = 0; | |
62 | out_iput: | |
63 | rcu_read_unlock(); | |
64 | trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status)); | |
65 | iput(inode); | |
66 | out: | |
67 | dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); | |
68 | return res->status; | |
69 | } | |
70 | ||
71 | __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, | |
72 | struct cb_process_state *cps) | |
73 | { | |
74 | struct inode *inode; | |
75 | __be32 res; | |
76 | ||
77 | res = htonl(NFS4ERR_OP_NOT_IN_SESSION); | |
78 | if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ | |
79 | goto out; | |
80 | ||
81 | dprintk_rcu("NFS: RECALL callback request from %s\n", | |
82 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); | |
83 | ||
84 | res = htonl(NFS4ERR_BADHANDLE); | |
85 | inode = nfs_delegation_find_inode(cps->clp, &args->fh); | |
86 | if (inode == NULL) | |
87 | goto out; | |
88 | /* Set up a helper thread to actually return the delegation */ | |
89 | switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { | |
90 | case 0: | |
91 | res = 0; | |
92 | break; | |
93 | case -ENOENT: | |
94 | res = htonl(NFS4ERR_BAD_STATEID); | |
95 | break; | |
96 | default: | |
97 | res = htonl(NFS4ERR_RESOURCE); | |
98 | } | |
99 | trace_nfs4_recall_delegation(inode, -ntohl(res)); | |
100 | iput(inode); | |
101 | out: | |
102 | dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); | |
103 | return res; | |
104 | } | |
105 | ||
106 | #if defined(CONFIG_NFS_V4_1) | |
107 | ||
108 | /* | |
109 | * Lookup a layout by filehandle. | |
110 | * | |
111 | * Note: gets a refcount on the layout hdr and on its respective inode. | |
112 | * Caller must put the layout hdr and the inode. | |
113 | * | |
114 | * TODO: keep track of all layouts (and delegations) in a hash table | |
115 | * hashed by filehandle. | |
116 | */ | |
117 | static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, | |
118 | struct nfs_fh *fh, nfs4_stateid *stateid) | |
119 | { | |
120 | struct nfs_server *server; | |
121 | struct inode *ino; | |
122 | struct pnfs_layout_hdr *lo; | |
123 | ||
124 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | |
125 | list_for_each_entry(lo, &server->layouts, plh_layouts) { | |
126 | if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid)) | |
127 | continue; | |
128 | if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh)) | |
129 | continue; | |
130 | ino = igrab(lo->plh_inode); | |
131 | if (!ino) | |
132 | break; | |
133 | spin_lock(&ino->i_lock); | |
134 | /* Is this layout in the process of being freed? */ | |
135 | if (NFS_I(ino)->layout != lo) { | |
136 | spin_unlock(&ino->i_lock); | |
137 | iput(ino); | |
138 | break; | |
139 | } | |
140 | pnfs_get_layout_hdr(lo); | |
141 | spin_unlock(&ino->i_lock); | |
142 | return lo; | |
143 | } | |
144 | } | |
145 | ||
146 | return NULL; | |
147 | } | |
148 | ||
149 | static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, | |
150 | struct nfs_fh *fh, nfs4_stateid *stateid) | |
151 | { | |
152 | struct pnfs_layout_hdr *lo; | |
153 | ||
154 | spin_lock(&clp->cl_lock); | |
155 | rcu_read_lock(); | |
156 | lo = get_layout_by_fh_locked(clp, fh, stateid); | |
157 | rcu_read_unlock(); | |
158 | spin_unlock(&clp->cl_lock); | |
159 | ||
160 | return lo; | |
161 | } | |
162 | ||
163 | static u32 initiate_file_draining(struct nfs_client *clp, | |
164 | struct cb_layoutrecallargs *args) | |
165 | { | |
166 | struct inode *ino; | |
167 | struct pnfs_layout_hdr *lo; | |
168 | u32 rv = NFS4ERR_NOMATCHING_LAYOUT; | |
169 | LIST_HEAD(free_me_list); | |
170 | ||
171 | lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid); | |
172 | if (!lo) | |
173 | goto out; | |
174 | ||
175 | ino = lo->plh_inode; | |
176 | ||
177 | spin_lock(&ino->i_lock); | |
178 | pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); | |
179 | spin_unlock(&ino->i_lock); | |
180 | ||
181 | pnfs_layoutcommit_inode(ino, false); | |
182 | ||
183 | spin_lock(&ino->i_lock); | |
184 | if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || | |
185 | pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, | |
186 | &args->cbl_range)) { | |
187 | rv = NFS4ERR_DELAY; | |
188 | goto unlock; | |
189 | } | |
190 | ||
191 | if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { | |
192 | NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, | |
193 | &args->cbl_range); | |
194 | } | |
195 | pnfs_mark_layout_returned_if_empty(lo); | |
196 | unlock: | |
197 | spin_unlock(&ino->i_lock); | |
198 | pnfs_free_lseg_list(&free_me_list); | |
199 | pnfs_put_layout_hdr(lo); | |
200 | trace_nfs4_cb_layoutrecall_inode(clp, &args->cbl_fh, ino, -rv); | |
201 | iput(ino); | |
202 | out: | |
203 | return rv; | |
204 | } | |
205 | ||
206 | static u32 initiate_bulk_draining(struct nfs_client *clp, | |
207 | struct cb_layoutrecallargs *args) | |
208 | { | |
209 | int stat; | |
210 | ||
211 | if (args->cbl_recall_type == RETURN_FSID) | |
212 | stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true); | |
213 | else | |
214 | stat = pnfs_destroy_layouts_byclid(clp, true); | |
215 | if (stat != 0) | |
216 | return NFS4ERR_DELAY; | |
217 | return NFS4ERR_NOMATCHING_LAYOUT; | |
218 | } | |
219 | ||
220 | static u32 do_callback_layoutrecall(struct nfs_client *clp, | |
221 | struct cb_layoutrecallargs *args) | |
222 | { | |
223 | u32 res; | |
224 | ||
225 | dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type); | |
226 | if (args->cbl_recall_type == RETURN_FILE) | |
227 | res = initiate_file_draining(clp, args); | |
228 | else | |
229 | res = initiate_bulk_draining(clp, args); | |
230 | dprintk("%s returning %i\n", __func__, res); | |
231 | return res; | |
232 | ||
233 | } | |
234 | ||
235 | __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args, | |
236 | void *dummy, struct cb_process_state *cps) | |
237 | { | |
238 | u32 res; | |
239 | ||
240 | dprintk("%s: -->\n", __func__); | |
241 | ||
242 | if (cps->clp) | |
243 | res = do_callback_layoutrecall(cps->clp, args); | |
244 | else | |
245 | res = NFS4ERR_OP_NOT_IN_SESSION; | |
246 | ||
247 | dprintk("%s: exit with status = %d\n", __func__, res); | |
248 | return cpu_to_be32(res); | |
249 | } | |
250 | ||
251 | static void pnfs_recall_all_layouts(struct nfs_client *clp) | |
252 | { | |
253 | struct cb_layoutrecallargs args; | |
254 | ||
255 | /* Pretend we got a CB_LAYOUTRECALL(ALL) */ | |
256 | memset(&args, 0, sizeof(args)); | |
257 | args.cbl_recall_type = RETURN_ALL; | |
258 | /* FIXME we ignore errors, what should we do? */ | |
259 | do_callback_layoutrecall(clp, &args); | |
260 | } | |
261 | ||
262 | __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args, | |
263 | void *dummy, struct cb_process_state *cps) | |
264 | { | |
265 | int i; | |
266 | __be32 res = 0; | |
267 | struct nfs_client *clp = cps->clp; | |
268 | struct nfs_server *server = NULL; | |
269 | ||
270 | dprintk("%s: -->\n", __func__); | |
271 | ||
272 | if (!clp) { | |
273 | res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); | |
274 | goto out; | |
275 | } | |
276 | ||
277 | for (i = 0; i < args->ndevs; i++) { | |
278 | struct cb_devicenotifyitem *dev = &args->devs[i]; | |
279 | ||
280 | if (!server || | |
281 | server->pnfs_curr_ld->id != dev->cbd_layout_type) { | |
282 | rcu_read_lock(); | |
283 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) | |
284 | if (server->pnfs_curr_ld && | |
285 | server->pnfs_curr_ld->id == dev->cbd_layout_type) { | |
286 | rcu_read_unlock(); | |
287 | goto found; | |
288 | } | |
289 | rcu_read_unlock(); | |
290 | dprintk("%s: layout type %u not found\n", | |
291 | __func__, dev->cbd_layout_type); | |
292 | continue; | |
293 | } | |
294 | ||
295 | found: | |
296 | nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); | |
297 | } | |
298 | ||
299 | out: | |
300 | kfree(args->devs); | |
301 | dprintk("%s: exit with status = %u\n", | |
302 | __func__, be32_to_cpu(res)); | |
303 | return res; | |
304 | } | |
305 | ||
306 | /* | |
307 | * Validate the sequenceID sent by the server. | |
308 | * Return success if the sequenceID is one more than what we last saw on | |
309 | * this slot, accounting for wraparound. Increments the slot's sequence. | |
310 | * | |
311 | * We don't yet implement a duplicate request cache, instead we set the | |
312 | * back channel ca_maxresponsesize_cached to zero. This is OK for now | |
313 | * since we only currently implement idempotent callbacks anyway. | |
314 | * | |
315 | * We have a single slot backchannel at this time, so we don't bother | |
316 | * checking the used_slots bit array on the table. The lower layer guarantees | |
317 | * a single outstanding callback request at a time. | |
318 | */ | |
319 | static __be32 | |
320 | validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) | |
321 | { | |
322 | struct nfs4_slot *slot; | |
323 | ||
324 | dprintk("%s enter. slotid %u seqid %u\n", | |
325 | __func__, args->csa_slotid, args->csa_sequenceid); | |
326 | ||
327 | if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS) | |
328 | return htonl(NFS4ERR_BADSLOT); | |
329 | ||
330 | slot = tbl->slots + args->csa_slotid; | |
331 | dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr); | |
332 | ||
333 | /* Normal */ | |
334 | if (likely(args->csa_sequenceid == slot->seq_nr + 1)) | |
335 | goto out_ok; | |
336 | ||
337 | /* Replay */ | |
338 | if (args->csa_sequenceid == slot->seq_nr) { | |
339 | dprintk("%s seqid %u is a replay\n", | |
340 | __func__, args->csa_sequenceid); | |
341 | /* Signal process_op to set this error on next op */ | |
342 | if (args->csa_cachethis == 0) | |
343 | return htonl(NFS4ERR_RETRY_UNCACHED_REP); | |
344 | ||
345 | /* The ca_maxresponsesize_cached is 0 with no DRC */ | |
346 | else if (args->csa_cachethis == 1) | |
347 | return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); | |
348 | } | |
349 | ||
350 | /* Wraparound */ | |
351 | if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { | |
352 | slot->seq_nr = 1; | |
353 | goto out_ok; | |
354 | } | |
355 | ||
356 | /* Misordered request */ | |
357 | return htonl(NFS4ERR_SEQ_MISORDERED); | |
358 | out_ok: | |
359 | tbl->highest_used_slotid = args->csa_slotid; | |
360 | return htonl(NFS4_OK); | |
361 | } | |
362 | ||
363 | /* | |
364 | * For each referring call triple, check the session's slot table for | |
365 | * a match. If the slot is in use and the sequence numbers match, the | |
366 | * client is still waiting for a response to the original request. | |
367 | */ | |
368 | static bool referring_call_exists(struct nfs_client *clp, | |
369 | uint32_t nrclists, | |
370 | struct referring_call_list *rclists) | |
371 | { | |
372 | bool status = 0; | |
373 | int i, j; | |
374 | struct nfs4_session *session; | |
375 | struct nfs4_slot_table *tbl; | |
376 | struct referring_call_list *rclist; | |
377 | struct referring_call *ref; | |
378 | ||
379 | /* | |
380 | * XXX When client trunking is implemented, this becomes | |
381 | * a session lookup from within the loop | |
382 | */ | |
383 | session = clp->cl_session; | |
384 | tbl = &session->fc_slot_table; | |
385 | ||
386 | for (i = 0; i < nrclists; i++) { | |
387 | rclist = &rclists[i]; | |
388 | if (memcmp(session->sess_id.data, | |
389 | rclist->rcl_sessionid.data, | |
390 | NFS4_MAX_SESSIONID_LEN) != 0) | |
391 | continue; | |
392 | ||
393 | for (j = 0; j < rclist->rcl_nrefcalls; j++) { | |
394 | ref = &rclist->rcl_refcalls[j]; | |
395 | ||
396 | dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " | |
397 | "slotid %u\n", __func__, | |
398 | ((u32 *)&rclist->rcl_sessionid.data)[0], | |
399 | ((u32 *)&rclist->rcl_sessionid.data)[1], | |
400 | ((u32 *)&rclist->rcl_sessionid.data)[2], | |
401 | ((u32 *)&rclist->rcl_sessionid.data)[3], | |
402 | ref->rc_sequenceid, ref->rc_slotid); | |
403 | ||
404 | spin_lock(&tbl->slot_tbl_lock); | |
405 | status = (test_bit(ref->rc_slotid, tbl->used_slots) && | |
406 | tbl->slots[ref->rc_slotid].seq_nr == | |
407 | ref->rc_sequenceid); | |
408 | spin_unlock(&tbl->slot_tbl_lock); | |
409 | if (status) | |
410 | goto out; | |
411 | } | |
412 | } | |
413 | ||
414 | out: | |
415 | return status; | |
416 | } | |
417 | ||
418 | __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, | |
419 | struct cb_sequenceres *res, | |
420 | struct cb_process_state *cps) | |
421 | { | |
422 | struct nfs4_slot_table *tbl; | |
423 | struct nfs4_slot *slot; | |
424 | struct nfs_client *clp; | |
425 | int i; | |
426 | __be32 status = htonl(NFS4ERR_BADSESSION); | |
427 | ||
428 | clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, | |
429 | &args->csa_sessionid, cps->minorversion); | |
430 | if (clp == NULL) | |
431 | goto out; | |
432 | ||
433 | if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) | |
434 | goto out; | |
435 | ||
436 | tbl = &clp->cl_session->bc_slot_table; | |
437 | slot = tbl->slots + args->csa_slotid; | |
438 | ||
439 | spin_lock(&tbl->slot_tbl_lock); | |
440 | /* state manager is resetting the session */ | |
441 | if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { | |
442 | status = htonl(NFS4ERR_DELAY); | |
443 | /* Return NFS4ERR_BADSESSION if we're draining the session | |
444 | * in order to reset it. | |
445 | */ | |
446 | if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) | |
447 | status = htonl(NFS4ERR_BADSESSION); | |
448 | goto out_unlock; | |
449 | } | |
450 | ||
451 | memcpy(&res->csr_sessionid, &args->csa_sessionid, | |
452 | sizeof(res->csr_sessionid)); | |
453 | res->csr_sequenceid = args->csa_sequenceid; | |
454 | res->csr_slotid = args->csa_slotid; | |
455 | res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; | |
456 | res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; | |
457 | ||
458 | status = validate_seqid(tbl, args); | |
459 | if (status) | |
460 | goto out_unlock; | |
461 | ||
462 | cps->slotid = args->csa_slotid; | |
463 | ||
464 | /* | |
465 | * Check for pending referring calls. If a match is found, a | |
466 | * related callback was received before the response to the original | |
467 | * call. | |
468 | */ | |
469 | if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { | |
470 | status = htonl(NFS4ERR_DELAY); | |
471 | goto out_unlock; | |
472 | } | |
473 | ||
474 | /* | |
475 | * RFC5661 20.9.3 | |
476 | * If CB_SEQUENCE returns an error, then the state of the slot | |
477 | * (sequence ID, cached reply) MUST NOT change. | |
478 | */ | |
479 | slot->seq_nr++; | |
480 | out_unlock: | |
481 | spin_unlock(&tbl->slot_tbl_lock); | |
482 | ||
483 | out: | |
484 | cps->clp = clp; /* put in nfs4_callback_compound */ | |
485 | for (i = 0; i < args->csa_nrclists; i++) | |
486 | kfree(args->csa_rclists[i].rcl_refcalls); | |
487 | kfree(args->csa_rclists); | |
488 | ||
489 | if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { | |
490 | cps->drc_status = status; | |
491 | status = 0; | |
492 | } else | |
493 | res->csr_status = status; | |
494 | ||
495 | trace_nfs4_cb_sequence(args, res, status); | |
496 | dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, | |
497 | ntohl(status), ntohl(res->csr_status)); | |
498 | return status; | |
499 | } | |
500 | ||
501 | static bool | |
502 | validate_bitmap_values(unsigned long mask) | |
503 | { | |
504 | return (mask & ~RCA4_TYPE_MASK_ALL) == 0; | |
505 | } | |
506 | ||
507 | __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, | |
508 | struct cb_process_state *cps) | |
509 | { | |
510 | __be32 status; | |
511 | fmode_t flags = 0; | |
512 | ||
513 | status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); | |
514 | if (!cps->clp) /* set in cb_sequence */ | |
515 | goto out; | |
516 | ||
517 | dprintk_rcu("NFS: RECALL_ANY callback request from %s\n", | |
518 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); | |
519 | ||
520 | status = cpu_to_be32(NFS4ERR_INVAL); | |
521 | if (!validate_bitmap_values(args->craa_type_mask)) | |
522 | goto out; | |
523 | ||
524 | status = cpu_to_be32(NFS4_OK); | |
525 | if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) | |
526 | &args->craa_type_mask)) | |
527 | flags = FMODE_READ; | |
528 | if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) | |
529 | &args->craa_type_mask)) | |
530 | flags |= FMODE_WRITE; | |
531 | if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *) | |
532 | &args->craa_type_mask)) | |
533 | pnfs_recall_all_layouts(cps->clp); | |
534 | if (flags) | |
535 | nfs_expire_unused_delegation_types(cps->clp, flags); | |
536 | out: | |
537 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | |
538 | return status; | |
539 | } | |
540 | ||
541 | /* Reduce the fore channel's max_slots to the target value */ | |
542 | __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, | |
543 | struct cb_process_state *cps) | |
544 | { | |
545 | struct nfs4_slot_table *fc_tbl; | |
546 | __be32 status; | |
547 | ||
548 | status = htonl(NFS4ERR_OP_NOT_IN_SESSION); | |
549 | if (!cps->clp) /* set in cb_sequence */ | |
550 | goto out; | |
551 | ||
552 | dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n", | |
553 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), | |
554 | args->crsa_target_highest_slotid); | |
555 | ||
556 | fc_tbl = &cps->clp->cl_session->fc_slot_table; | |
557 | ||
558 | status = htonl(NFS4_OK); | |
559 | ||
560 | nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid); | |
561 | nfs41_notify_server(cps->clp); | |
562 | out: | |
563 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | |
564 | return status; | |
565 | } | |
566 | #endif /* CONFIG_NFS_V4_1 */ |