]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - fs/nfs/nfs4filelayout.c
NFSv4.1 dereference a disconnected data server client record
[mirror_ubuntu-focal-kernel.git] / fs / nfs / nfs4filelayout.c
CommitLineData
7ab672ce
DH
1/*
2 * Module for the pnfs nfs4 file layout driver.
3 * Defines all I/O and Policy interface operations, plus code
4 * to register itself with the pNFS client.
5 *
6 * Copyright (c) 2002
7 * The Regents of the University of Michigan
8 * All Rights Reserved
9 *
10 * Dean Hildebrand <dhildebz@umich.edu>
11 *
12 * Permission is granted to use, copy, create derivative works, and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the University of Michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. If
17 * the above copyright notice or any other identification of the
18 * University of Michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * This software is provided as is, without representation or warranty
22 * of any kind either express or implied, including without limitation
23 * the implied warranties of merchantability, fitness for a particular
24 * purpose, or noninfringement. The Regents of the University of
25 * Michigan shall not be liable for any damages, including special,
26 * indirect, incidental, or consequential damages, with respect to any
27 * claim arising out of or in connection with the use of the software,
28 * even if it has been or is hereafter advised of the possibility of
29 * such damages.
30 */
31
32#include <linux/nfs_fs.h>
19345cb2 33#include <linux/nfs_page.h>
143cb494 34#include <linux/module.h>
16b374ca 35
0a702195
WAA
36#include <linux/sunrpc/metrics.h>
37
16b374ca 38#include "internal.h"
9cb81968 39#include "delegation.h"
16b374ca 40#include "nfs4filelayout.h"
7ab672ce
DH
41
42#define NFSDBG_FACILITY NFSDBG_PNFS_LD
43
44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
46MODULE_DESCRIPTION("The NFSv4 file layout driver");
47
cbdabc7f
AA
48#define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
49
cfe7f412
FI
50static loff_t
51filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
52 loff_t offset)
53{
54 u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
3476f114
CM
55 u64 stripe_no;
56 u32 rem;
cfe7f412
FI
57
58 offset -= flseg->pattern_offset;
3476f114
CM
59 stripe_no = div_u64(offset, stripe_width);
60 div_u64_rem(offset, flseg->stripe_unit, &rem);
cfe7f412 61
3476f114 62 return stripe_no * flseg->stripe_unit + rem;
cfe7f412
FI
63}
64
65/* This function is used by the layout driver to calculate the
66 * offset of the file on the dserver based on whether the
67 * layout type is STRIPE_DENSE or STRIPE_SPARSE
68 */
69static loff_t
70filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
71{
72 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
73
74 switch (flseg->stripe_type) {
75 case STRIPE_SPARSE:
76 return offset;
77
78 case STRIPE_DENSE:
79 return filelayout_get_dense_offset(flseg, offset);
80 }
81
82 BUG();
83}
84
e7dd79af
AA
85static void filelayout_reset_write(struct nfs_write_data *data)
86{
87 struct nfs_pgio_header *hdr = data->header;
88 struct inode *inode = hdr->inode;
89 struct rpc_task *task = &data->task;
90
91 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
92 dprintk("%s Reset task %5u for i/o through MDS "
93 "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
94 data->task.tk_pid,
95 inode->i_sb->s_id,
96 (long long)NFS_FILEID(inode),
97 data->args.count,
98 (unsigned long long)data->args.offset);
99
100 task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
101 &hdr->pages,
102 hdr->completion_ops);
103 }
3a7936c3
AA
104 /* balance nfs_get_client in filelayout_write_pagelist */
105 nfs_put_client(data->ds_clp);
106 data->ds_clp = NULL;
e7dd79af
AA
107}
108
109static void filelayout_reset_read(struct nfs_read_data *data)
110{
111 struct nfs_pgio_header *hdr = data->header;
112 struct inode *inode = hdr->inode;
113 struct rpc_task *task = &data->task;
114
115 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
116 dprintk("%s Reset task %5u for i/o through MDS "
117 "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
118 data->task.tk_pid,
119 inode->i_sb->s_id,
120 (long long)NFS_FILEID(inode),
121 data->args.count,
122 (unsigned long long)data->args.offset);
123
124 task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
125 &hdr->pages,
126 hdr->completion_ops);
127 }
3a7936c3
AA
128 /* balance nfs_get_client in filelayout_read_pagelist */
129 nfs_put_client(data->ds_clp);
130 data->ds_clp = NULL;
e7dd79af
AA
131}
132
cbdabc7f
AA
133static int filelayout_async_handle_error(struct rpc_task *task,
134 struct nfs4_state *state,
135 struct nfs_client *clp,
e7dd79af 136 struct pnfs_layout_segment *lseg)
cbdabc7f 137{
e7dd79af
AA
138 struct inode *inode = lseg->pls_layout->plh_inode;
139 struct nfs_server *mds_server = NFS_SERVER(inode);
140 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
9cb81968 141 struct nfs_client *mds_client = mds_server->nfs_client;
671fb896 142 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
9cb81968 143
cbdabc7f
AA
144 if (task->tk_status >= 0)
145 return 0;
cbdabc7f
AA
146
147 switch (task->tk_status) {
9cb81968
AA
148 /* MDS state errors */
149 case -NFS4ERR_DELEG_REVOKED:
150 case -NFS4ERR_ADMIN_REVOKED:
151 case -NFS4ERR_BAD_STATEID:
e7dd79af
AA
152 if (state == NULL)
153 break;
2dc31756 154 nfs_remove_bad_delegation(state->inode);
9cb81968 155 case -NFS4ERR_OPENMODE:
e7dd79af
AA
156 if (state == NULL)
157 break;
9cb81968
AA
158 nfs4_schedule_stateid_recovery(mds_server, state);
159 goto wait_on_recovery;
160 case -NFS4ERR_EXPIRED:
e7dd79af
AA
161 if (state != NULL)
162 nfs4_schedule_stateid_recovery(mds_server, state);
9cb81968
AA
163 nfs4_schedule_lease_recovery(mds_client);
164 goto wait_on_recovery;
165 /* DS session errors */
cbdabc7f
AA
166 case -NFS4ERR_BADSESSION:
167 case -NFS4ERR_BADSLOT:
168 case -NFS4ERR_BAD_HIGH_SLOT:
169 case -NFS4ERR_DEADSESSION:
170 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
171 case -NFS4ERR_SEQ_FALSE_RETRY:
172 case -NFS4ERR_SEQ_MISORDERED:
173 dprintk("%s ERROR %d, Reset session. Exchangeid "
174 "flags 0x%x\n", __func__, task->tk_status,
175 clp->cl_exchange_flags);
176 nfs4_schedule_session_recovery(clp->cl_session);
177 break;
178 case -NFS4ERR_DELAY:
179 case -NFS4ERR_GRACE:
180 case -EKEYEXPIRED:
181 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
182 break;
a8a4ae3a
AA
183 case -NFS4ERR_RETRY_UNCACHED_REP:
184 break;
e7dd79af
AA
185 /* RPC connection errors */
186 case -ECONNREFUSED:
187 case -EHOSTDOWN:
188 case -EHOSTUNREACH:
189 case -ENETUNREACH:
190 case -EIO:
191 case -ETIMEDOUT:
192 case -EPIPE:
193 dprintk("%s DS connection error %d\n", __func__,
194 task->tk_status);
0a57cdac
AA
195 if (!filelayout_test_devid_invalid(devid))
196 _pnfs_return_layout(state->inode);
e7dd79af 197 filelayout_mark_devid_invalid(devid);
671fb896 198 rpc_wake_up(&tbl->slot_tbl_waitq);
b4a2967e 199 nfs4_ds_disconnect(clp);
e7dd79af 200 /* fall through */
cbdabc7f 201 default:
e7dd79af 202 dprintk("%s Retry through MDS. Error %d\n", __func__,
cbdabc7f 203 task->tk_status);
e7dd79af 204 return -NFS4ERR_RESET_TO_MDS;
cbdabc7f 205 }
9cb81968 206out:
cbdabc7f
AA
207 task->tk_status = 0;
208 return -EAGAIN;
9cb81968
AA
209wait_on_recovery:
210 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
211 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
212 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
213 goto out;
cbdabc7f
AA
214}
215
216/* NFS_PROTO call done callback routines */
217
218static int filelayout_read_done_cb(struct rpc_task *task,
219 struct nfs_read_data *data)
220{
e7dd79af
AA
221 struct nfs_pgio_header *hdr = data->header;
222 int err;
cbdabc7f 223
e7dd79af
AA
224 err = filelayout_async_handle_error(task, data->args.context->state,
225 data->ds_clp, hdr->lseg);
cbdabc7f 226
e7dd79af
AA
227 switch (err) {
228 case -NFS4ERR_RESET_TO_MDS:
229 filelayout_reset_read(data);
230 return task->tk_status;
231 case -EAGAIN:
d00c5d43 232 rpc_restart_call_prepare(task);
cbdabc7f
AA
233 return -EAGAIN;
234 }
235
236 return 0;
237}
238
863a3c6c
AA
239/*
240 * We reference the rpc_cred of the first WRITE that triggers the need for
241 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
242 * rfc5661 is not clear about which credential should be used.
243 */
244static void
245filelayout_set_layoutcommit(struct nfs_write_data *wdata)
246{
cd841605
FI
247 struct nfs_pgio_header *hdr = wdata->header;
248
249 if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
863a3c6c
AA
250 wdata->res.verf->committed == NFS_FILE_SYNC)
251 return;
252
253 pnfs_set_layoutcommit(wdata);
cd841605
FI
254 dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
255 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
863a3c6c
AA
256}
257
dc70d7b3
AA
258/*
259 * Call ops for the async read/write cases
260 * In the case of dense layouts, the offset needs to be reset to its
261 * original value.
262 */
263static void filelayout_read_prepare(struct rpc_task *task, void *data)
264{
cd12ae32 265 struct nfs_read_data *rdata = data;
0ad2f378 266 struct pnfs_layout_segment *lseg = rdata->header->lseg;
dc70d7b3 267
0ad2f378
AA
268 if (filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg))) {
269 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
270 filelayout_reset_read(rdata);
271 rpc_exit(task, 0);
272 return;
273 }
cbdabc7f
AA
274 rdata->read_done_cb = filelayout_read_done_cb;
275
dc70d7b3
AA
276 if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
277 &rdata->args.seq_args, &rdata->res.seq_res,
9d12b216 278 task))
dc70d7b3
AA
279 return;
280
281 rpc_call_start(task);
282}
283
284static void filelayout_read_call_done(struct rpc_task *task, void *data)
285{
cd12ae32 286 struct nfs_read_data *rdata = data;
dc70d7b3
AA
287
288 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
289
0ad2f378
AA
290 if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags))
291 return;
292
dc70d7b3 293 /* Note this may cause RPC to be resent */
cd841605 294 rdata->header->mds_ops->rpc_call_done(task, data);
dc70d7b3
AA
295}
296
0a702195
WAA
297static void filelayout_read_count_stats(struct rpc_task *task, void *data)
298{
cd12ae32 299 struct nfs_read_data *rdata = data;
0a702195 300
cd841605 301 rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
0a702195
WAA
302}
303
dc70d7b3
AA
304static void filelayout_read_release(void *data)
305{
cd12ae32 306 struct nfs_read_data *rdata = data;
dc70d7b3 307
3a7936c3
AA
308 if (!test_bit(NFS_IOHDR_REDO, &rdata->header->flags))
309 nfs_put_client(rdata->ds_clp);
cd841605 310 rdata->header->mds_ops->rpc_release(data);
dc70d7b3
AA
311}
312
a69aef14
FI
313static int filelayout_write_done_cb(struct rpc_task *task,
314 struct nfs_write_data *data)
315{
e7dd79af
AA
316 struct nfs_pgio_header *hdr = data->header;
317 int err;
318
319 err = filelayout_async_handle_error(task, data->args.context->state,
320 data->ds_clp, hdr->lseg);
321
322 switch (err) {
323 case -NFS4ERR_RESET_TO_MDS:
324 filelayout_reset_write(data);
325 return task->tk_status;
326 case -EAGAIN:
d00c5d43 327 rpc_restart_call_prepare(task);
a69aef14
FI
328 return -EAGAIN;
329 }
330
863a3c6c 331 filelayout_set_layoutcommit(data);
a69aef14
FI
332 return 0;
333}
334
e0c2b380 335/* Fake up some data that will cause nfs_commit_release to retry the writes. */
0b7c0153 336static void prepare_to_resend_writes(struct nfs_commit_data *data)
e0c2b380
FI
337{
338 struct nfs_page *first = nfs_list_entry(data->pages.next);
339
340 data->task.tk_status = 0;
341 memcpy(data->verf.verifier, first->wb_verf.verifier,
342 sizeof(first->wb_verf.verifier));
343 data->verf.verifier[0]++; /* ensure verifier mismatch */
344}
345
346static int filelayout_commit_done_cb(struct rpc_task *task,
0b7c0153 347 struct nfs_commit_data *data)
e0c2b380 348{
e7dd79af
AA
349 int err;
350
351 err = filelayout_async_handle_error(task, NULL, data->ds_clp,
352 data->lseg);
353
354 switch (err) {
355 case -NFS4ERR_RESET_TO_MDS:
356 prepare_to_resend_writes(data);
357 return -EAGAIN;
358 case -EAGAIN:
359 rpc_restart_call_prepare(task);
e0c2b380
FI
360 return -EAGAIN;
361 }
362
363 return 0;
364}
365
a69aef14
FI
366static void filelayout_write_prepare(struct rpc_task *task, void *data)
367{
cd12ae32 368 struct nfs_write_data *wdata = data;
0ad2f378 369 struct pnfs_layout_segment *lseg = wdata->header->lseg;
a69aef14 370
0ad2f378
AA
371 if (filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg))) {
372 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
373 filelayout_reset_write(wdata);
374 rpc_exit(task, 0);
375 return;
376 }
a69aef14
FI
377 if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
378 &wdata->args.seq_args, &wdata->res.seq_res,
9d12b216 379 task))
a69aef14
FI
380 return;
381
382 rpc_call_start(task);
383}
384
385static void filelayout_write_call_done(struct rpc_task *task, void *data)
386{
cd12ae32 387 struct nfs_write_data *wdata = data;
a69aef14 388
0ad2f378
AA
389 if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags))
390 return;
391
a69aef14 392 /* Note this may cause RPC to be resent */
cd841605 393 wdata->header->mds_ops->rpc_call_done(task, data);
a69aef14
FI
394}
395
0a702195
WAA
396static void filelayout_write_count_stats(struct rpc_task *task, void *data)
397{
cd12ae32 398 struct nfs_write_data *wdata = data;
0a702195 399
cd841605 400 rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
0a702195
WAA
401}
402
a69aef14
FI
403static void filelayout_write_release(void *data)
404{
cd12ae32 405 struct nfs_write_data *wdata = data;
a69aef14 406
3a7936c3
AA
407 if (!test_bit(NFS_IOHDR_REDO, &wdata->header->flags))
408 nfs_put_client(wdata->ds_clp);
cd841605 409 wdata->header->mds_ops->rpc_release(data);
a69aef14
FI
410}
411
0b7c0153 412static void filelayout_commit_prepare(struct rpc_task *task, void *data)
e0c2b380 413{
0b7c0153 414 struct nfs_commit_data *wdata = data;
e0c2b380 415
0b7c0153
FI
416 if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
417 &wdata->args.seq_args, &wdata->res.seq_res,
418 task))
419 return;
420
421 rpc_call_start(task);
422}
423
424static void filelayout_write_commit_done(struct rpc_task *task, void *data)
425{
426 struct nfs_commit_data *wdata = data;
427
428 /* Note this may cause RPC to be resent */
429 wdata->mds_ops->rpc_call_done(task, data);
430}
431
432static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
433{
434 struct nfs_commit_data *cdata = data;
435
436 rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
437}
438
439static void filelayout_commit_release(void *calldata)
440{
441 struct nfs_commit_data *data = calldata;
442
f453a54a 443 data->completion_ops->completion(data);
0b7c0153 444 put_lseg(data->lseg);
3a7936c3 445 nfs_put_client(data->ds_clp);
0b7c0153 446 nfs_commitdata_release(data);
e0c2b380
FI
447}
448
17280175 449static const struct rpc_call_ops filelayout_read_call_ops = {
dc70d7b3
AA
450 .rpc_call_prepare = filelayout_read_prepare,
451 .rpc_call_done = filelayout_read_call_done,
0a702195 452 .rpc_count_stats = filelayout_read_count_stats,
dc70d7b3
AA
453 .rpc_release = filelayout_read_release,
454};
455
17280175 456static const struct rpc_call_ops filelayout_write_call_ops = {
a69aef14
FI
457 .rpc_call_prepare = filelayout_write_prepare,
458 .rpc_call_done = filelayout_write_call_done,
0a702195 459 .rpc_count_stats = filelayout_write_count_stats,
a69aef14
FI
460 .rpc_release = filelayout_write_release,
461};
462
17280175 463static const struct rpc_call_ops filelayout_commit_call_ops = {
0b7c0153
FI
464 .rpc_call_prepare = filelayout_commit_prepare,
465 .rpc_call_done = filelayout_write_commit_done,
466 .rpc_count_stats = filelayout_commit_count_stats,
e0c2b380
FI
467 .rpc_release = filelayout_commit_release,
468};
469
dc70d7b3
AA
470static enum pnfs_try_status
471filelayout_read_pagelist(struct nfs_read_data *data)
472{
cd841605
FI
473 struct nfs_pgio_header *hdr = data->header;
474 struct pnfs_layout_segment *lseg = hdr->lseg;
dc70d7b3
AA
475 struct nfs4_pnfs_ds *ds;
476 loff_t offset = data->args.offset;
477 u32 j, idx;
478 struct nfs_fh *fh;
479 int status;
480
481 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
cd841605 482 __func__, hdr->inode->i_ino,
dc70d7b3
AA
483 data->args.pgbase, (size_t)data->args.count, offset);
484
485 /* Retrieve the correct rpc_client for the byte range */
486 j = nfs4_fl_calc_j_index(lseg, offset);
487 idx = nfs4_fl_calc_ds_index(lseg, j);
488 ds = nfs4_fl_prepare_ds(lseg, idx);
90fecfcb 489 if (!ds)
dc70d7b3 490 return PNFS_NOT_ATTEMPTED;
3a7936c3
AA
491 dprintk("%s USE DS: %s cl_count %d\n", __func__,
492 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
dc70d7b3
AA
493
494 /* No multipath support. Use first DS */
3a7936c3 495 atomic_inc(&ds->ds_clp->cl_count);
dc70d7b3
AA
496 data->ds_clp = ds->ds_clp;
497 fh = nfs4_fl_select_ds_fh(lseg, j);
498 if (fh)
499 data->args.fh = fh;
500
501 data->args.offset = filelayout_get_dserver_offset(lseg, offset);
502 data->mds_offset = offset;
503
504 /* Perform an asynchronous read to ds */
c5996c4e 505 status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
9f0ec176 506 &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
dc70d7b3
AA
507 BUG_ON(status != 0);
508 return PNFS_ATTEMPTED;
509}
510
a69aef14 511/* Perform async writes. */
0382b744
AA
512static enum pnfs_try_status
513filelayout_write_pagelist(struct nfs_write_data *data, int sync)
514{
cd841605
FI
515 struct nfs_pgio_header *hdr = data->header;
516 struct pnfs_layout_segment *lseg = hdr->lseg;
a69aef14
FI
517 struct nfs4_pnfs_ds *ds;
518 loff_t offset = data->args.offset;
519 u32 j, idx;
520 struct nfs_fh *fh;
521 int status;
522
523 /* Retrieve the correct rpc_client for the byte range */
524 j = nfs4_fl_calc_j_index(lseg, offset);
525 idx = nfs4_fl_calc_ds_index(lseg, j);
526 ds = nfs4_fl_prepare_ds(lseg, idx);
90fecfcb 527 if (!ds)
a69aef14 528 return PNFS_NOT_ATTEMPTED;
3a7936c3
AA
529 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
530 __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
531 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
a69aef14 532
a69aef14 533 data->write_done_cb = filelayout_write_done_cb;
3a7936c3 534 atomic_inc(&ds->ds_clp->cl_count);
a69aef14
FI
535 data->ds_clp = ds->ds_clp;
536 fh = nfs4_fl_select_ds_fh(lseg, j);
537 if (fh)
538 data->args.fh = fh;
539 /*
540 * Get the file offset on the dserver. Set the write offset to
541 * this offset and save the original offset.
542 */
543 data->args.offset = filelayout_get_dserver_offset(lseg, offset);
a69aef14
FI
544
545 /* Perform an asynchronous write */
c5996c4e 546 status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
9f0ec176
AA
547 &filelayout_write_call_ops, sync,
548 RPC_TASK_SOFTCONN);
a69aef14
FI
549 BUG_ON(status != 0);
550 return PNFS_ATTEMPTED;
0382b744
AA
551}
552
16b374ca
AA
553/*
554 * filelayout_check_layout()
555 *
556 * Make sure layout segment parameters are sane WRT the device.
557 * At this point no generic layer initialization of the lseg has occurred,
558 * and nothing has been added to the layout_hdr cache.
559 *
560 */
561static int
562filelayout_check_layout(struct pnfs_layout_hdr *lo,
563 struct nfs4_filelayout_segment *fl,
564 struct nfs4_layoutget_res *lgr,
a75b9df9
TM
565 struct nfs4_deviceid *id,
566 gfp_t gfp_flags)
16b374ca 567{
a1eaecbc 568 struct nfs4_deviceid_node *d;
16b374ca
AA
569 struct nfs4_file_layout_dsaddr *dsaddr;
570 int status = -EINVAL;
b7edfaa1 571 struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
16b374ca
AA
572
573 dprintk("--> %s\n", __func__);
574
7c24d948
AA
575 /* FIXME: remove this check when layout segment support is added */
576 if (lgr->range.offset != 0 ||
577 lgr->range.length != NFS4_MAX_UINT64) {
578 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
579 __func__);
580 goto out;
581 }
582
16b374ca 583 if (fl->pattern_offset > lgr->range.offset) {
3b6445a6 584 dprintk("%s pattern_offset %lld too large\n",
16b374ca
AA
585 __func__, fl->pattern_offset);
586 goto out;
587 }
588
75247aff
BH
589 if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) {
590 dprintk("%s Invalid stripe unit (%u)\n",
16b374ca
AA
591 __func__, fl->stripe_unit);
592 goto out;
593 }
594
595 /* find and reference the deviceid */
35c8bb54
BH
596 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
597 NFS_SERVER(lo->plh_inode)->nfs_client, id);
a1eaecbc 598 if (d == NULL) {
a75b9df9 599 dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
16b374ca
AA
600 if (dsaddr == NULL)
601 goto out;
a1eaecbc
BH
602 } else
603 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
c47abcf8
AA
604 /* Found deviceid is being reaped */
605 if (test_bit(NFS_DEVICEID_INVALID, &dsaddr->id_node.flags))
606 goto out_put;
607
16b374ca
AA
608 fl->dsaddr = dsaddr;
609
e414966b
CL
610 if (fl->first_stripe_index >= dsaddr->stripe_count) {
611 dprintk("%s Bad first_stripe_index %u\n",
16b374ca
AA
612 __func__, fl->first_stripe_index);
613 goto out_put;
614 }
615
616 if ((fl->stripe_type == STRIPE_SPARSE &&
617 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
618 (fl->stripe_type == STRIPE_DENSE &&
619 fl->num_fh != dsaddr->stripe_count)) {
620 dprintk("%s num_fh %u not valid for given packing\n",
621 __func__, fl->num_fh);
622 goto out_put;
623 }
624
625 if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
626 dprintk("%s Stripe unit (%u) not aligned with rsize %u "
627 "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
628 nfss->wsize);
629 }
630
631 status = 0;
632out:
633 dprintk("--> %s returns %d\n", __func__, status);
634 return status;
635out_put:
ea8eecdd 636 nfs4_fl_put_deviceid(dsaddr);
16b374ca
AA
637 goto out;
638}
639
640static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
641{
642 int i;
643
644 for (i = 0; i < fl->num_fh; i++) {
645 if (!fl->fh_array[i])
646 break;
647 kfree(fl->fh_array[i]);
648 }
649 kfree(fl->fh_array);
650 fl->fh_array = NULL;
651}
652
653static void
654_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
655{
656 filelayout_free_fh_array(fl);
657 kfree(fl);
658}
659
660static int
661filelayout_decode_layout(struct pnfs_layout_hdr *flo,
662 struct nfs4_filelayout_segment *fl,
663 struct nfs4_layoutget_res *lgr,
a75b9df9
TM
664 struct nfs4_deviceid *id,
665 gfp_t gfp_flags)
16b374ca 666{
35124a09 667 struct xdr_stream stream;
f7da7a12 668 struct xdr_buf buf;
35124a09
WAA
669 struct page *scratch;
670 __be32 *p;
16b374ca
AA
671 uint32_t nfl_util;
672 int i;
673
674 dprintk("%s: set_layout_map Begin\n", __func__);
675
a75b9df9 676 scratch = alloc_page(gfp_flags);
35124a09
WAA
677 if (!scratch)
678 return -ENOMEM;
679
f7da7a12 680 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
35124a09
WAA
681 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
682
683 /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
684 * num_fh (4) */
685 p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
686 if (unlikely(!p))
687 goto out_err;
688
16b374ca
AA
689 memcpy(id, p, sizeof(*id));
690 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
a1eaecbc 691 nfs4_print_deviceid(id);
16b374ca
AA
692
693 nfl_util = be32_to_cpup(p++);
694 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
695 fl->commit_through_mds = 1;
696 if (nfl_util & NFL4_UFLG_DENSE)
697 fl->stripe_type = STRIPE_DENSE;
698 else
699 fl->stripe_type = STRIPE_SPARSE;
700 fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
701
702 fl->first_stripe_index = be32_to_cpup(p++);
703 p = xdr_decode_hyper(p, &fl->pattern_offset);
704 fl->num_fh = be32_to_cpup(p++);
705
706 dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
707 __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
708 fl->pattern_offset);
709
cec765cf
AA
710 /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
711 * Futher checking is done in filelayout_check_layout */
e414966b 712 if (fl->num_fh >
cec765cf 713 max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
35124a09
WAA
714 goto out_err;
715
cec765cf
AA
716 if (fl->num_fh > 0) {
717 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
718 gfp_flags);
719 if (!fl->fh_array)
720 goto out_err;
721 }
16b374ca
AA
722
723 for (i = 0; i < fl->num_fh; i++) {
724 /* Do we want to use a mempool here? */
a75b9df9 725 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
35124a09
WAA
726 if (!fl->fh_array[i])
727 goto out_err_free;
728
729 p = xdr_inline_decode(&stream, 4);
730 if (unlikely(!p))
731 goto out_err_free;
16b374ca
AA
732 fl->fh_array[i]->size = be32_to_cpup(p++);
733 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
f9fd2d9c 734 printk(KERN_ERR "NFS: Too big fh %d received %d\n",
16b374ca 735 i, fl->fh_array[i]->size);
35124a09 736 goto out_err_free;
16b374ca 737 }
35124a09
WAA
738
739 p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
740 if (unlikely(!p))
741 goto out_err_free;
16b374ca 742 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
16b374ca
AA
743 dprintk("DEBUG: %s: fh len %d\n", __func__,
744 fl->fh_array[i]->size);
745 }
746
35124a09 747 __free_page(scratch);
16b374ca 748 return 0;
35124a09
WAA
749
750out_err_free:
751 filelayout_free_fh_array(fl);
752out_err:
753 __free_page(scratch);
754 return -EIO;
16b374ca
AA
755}
756
c879513e
FI
757static void
758filelayout_free_lseg(struct pnfs_layout_segment *lseg)
759{
760 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
761
762 dprintk("--> %s\n", __func__);
763 nfs4_fl_put_deviceid(fl->dsaddr);
799ba8d5
FI
764 /* This assumes a single RW lseg */
765 if (lseg->pls_range.iomode == IOMODE_RW) {
766 struct nfs4_filelayout *flo;
767
768 flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
769 flo->commit_info.nbuckets = 0;
770 kfree(flo->commit_info.buckets);
771 flo->commit_info.buckets = NULL;
772 }
c879513e
FI
773 _filelayout_free_lseg(fl);
774}
775
799ba8d5
FI
776static int
777filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
ea2cf228 778 struct nfs_commit_info *cinfo,
799ba8d5
FI
779 gfp_t gfp_flags)
780{
781 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
ea2cf228 782 struct pnfs_commit_bucket *buckets;
799ba8d5
FI
783 int size;
784
785 if (fl->commit_through_mds)
786 return 0;
ea2cf228 787 if (cinfo->ds->nbuckets != 0) {
799ba8d5
FI
788 /* This assumes there is only one IOMODE_RW lseg. What
789 * we really want to do is have a layout_hdr level
790 * dictionary of <multipath_list4, fh> keys, each
791 * associated with a struct list_head, populated by calls
792 * to filelayout_write_pagelist().
793 * */
794 return 0;
795 }
796
797 size = (fl->stripe_type == STRIPE_SPARSE) ?
798 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
799
ea2cf228 800 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
799ba8d5
FI
801 gfp_flags);
802 if (!buckets)
803 return -ENOMEM;
804 else {
805 int i;
806
ea2cf228
FI
807 spin_lock(cinfo->lock);
808 if (cinfo->ds->nbuckets != 0)
799ba8d5
FI
809 kfree(buckets);
810 else {
ea2cf228
FI
811 cinfo->ds->buckets = buckets;
812 cinfo->ds->nbuckets = size;
799ba8d5
FI
813 for (i = 0; i < size; i++) {
814 INIT_LIST_HEAD(&buckets[i].written);
815 INIT_LIST_HEAD(&buckets[i].committing);
816 }
817 }
ea2cf228 818 spin_unlock(cinfo->lock);
799ba8d5
FI
819 return 0;
820 }
821}
822
16b374ca
AA
823static struct pnfs_layout_segment *
824filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
a75b9df9
TM
825 struct nfs4_layoutget_res *lgr,
826 gfp_t gfp_flags)
16b374ca
AA
827{
828 struct nfs4_filelayout_segment *fl;
829 int rc;
830 struct nfs4_deviceid id;
831
832 dprintk("--> %s\n", __func__);
a75b9df9 833 fl = kzalloc(sizeof(*fl), gfp_flags);
16b374ca
AA
834 if (!fl)
835 return NULL;
836
a75b9df9
TM
837 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
838 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
16b374ca
AA
839 _filelayout_free_lseg(fl);
840 return NULL;
841 }
842 return &fl->generic_hdr;
843}
844
94ad1c80
FI
845/*
846 * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
847 *
18ad0a9f
BH
848 * return true : coalesce page
849 * return false : don't coalesce page
94ad1c80 850 */
1751c363 851static bool
94ad1c80
FI
852filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
853 struct nfs_page *req)
854{
855 u64 p_stripe, r_stripe;
856 u32 stripe_unit;
857
19345cb2
BH
858 if (!pnfs_generic_pg_test(pgio, prev, req) ||
859 !nfs_generic_pg_test(pgio, prev, req))
860 return false;
89a58e32 861
b5542849
FI
862 p_stripe = (u64)req_offset(prev);
863 r_stripe = (u64)req_offset(req);
94ad1c80
FI
864 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
865
866 do_div(p_stripe, stripe_unit);
867 do_div(r_stripe, stripe_unit);
868
869 return (p_stripe == r_stripe);
870}
871
17280175 872static void
7c24d948
AA
873filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
874 struct nfs_page *req)
875{
876 BUG_ON(pgio->pg_lseg != NULL);
877
1825a0d0
FI
878 if (req->wb_offset != req->wb_pgbase) {
879 /*
880 * Handling unaligned pages is difficult, because have to
881 * somehow split a req in two in certain cases in the
882 * pg.test code. Avoid this by just not using pnfs
883 * in this case.
884 */
885 nfs_pageio_reset_read_mds(pgio);
886 return;
887 }
7c24d948
AA
888 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
889 req->wb_context,
890 0,
891 NFS4_MAX_UINT64,
892 IOMODE_READ,
893 GFP_KERNEL);
894 /* If no lseg, fall back to read through mds */
895 if (pgio->pg_lseg == NULL)
1f945357 896 nfs_pageio_reset_read_mds(pgio);
7c24d948
AA
897}
898
17280175 899static void
7c24d948
AA
900filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
901 struct nfs_page *req)
902{
ea2cf228 903 struct nfs_commit_info cinfo;
799ba8d5
FI
904 int status;
905
7c24d948
AA
906 BUG_ON(pgio->pg_lseg != NULL);
907
1825a0d0
FI
908 if (req->wb_offset != req->wb_pgbase)
909 goto out_mds;
7c24d948
AA
910 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
911 req->wb_context,
912 0,
913 NFS4_MAX_UINT64,
914 IOMODE_RW,
915 GFP_NOFS);
916 /* If no lseg, fall back to write through mds */
917 if (pgio->pg_lseg == NULL)
799ba8d5 918 goto out_mds;
ea2cf228
FI
919 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
920 status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
799ba8d5
FI
921 if (status < 0) {
922 put_lseg(pgio->pg_lseg);
923 pgio->pg_lseg = NULL;
924 goto out_mds;
925 }
926 return;
927out_mds:
928 nfs_pageio_reset_write_mds(pgio);
7c24d948
AA
929}
930
1751c363 931static const struct nfs_pageio_ops filelayout_pg_read_ops = {
7c24d948 932 .pg_init = filelayout_pg_init_read,
1751c363 933 .pg_test = filelayout_pg_test,
493292dd 934 .pg_doio = pnfs_generic_pg_readpages,
1751c363
TM
935};
936
937static const struct nfs_pageio_ops filelayout_pg_write_ops = {
7c24d948 938 .pg_init = filelayout_pg_init_write,
1751c363 939 .pg_test = filelayout_pg_test,
dce81290 940 .pg_doio = pnfs_generic_pg_writepages,
1751c363
TM
941};
942
e0c2b380
FI
943static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
944{
945 if (fl->stripe_type == STRIPE_SPARSE)
946 return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
947 else
948 return j;
949}
950
d6d6dc7c
FI
951/* The generic layer is about to remove the req from the commit list.
952 * If this will make the bucket empty, it will need to put the lseg reference.
d6d6dc7c 953 */
8dd37758 954static void
ea2cf228
FI
955filelayout_clear_request_commit(struct nfs_page *req,
956 struct nfs_commit_info *cinfo)
d6d6dc7c 957{
8dd37758 958 struct pnfs_layout_segment *freeme = NULL;
8dd37758 959
ea2cf228 960 spin_lock(cinfo->lock);
8dd37758
TM
961 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
962 goto out;
ea2cf228 963 cinfo->ds->nwritten--;
d6d6dc7c 964 if (list_is_singular(&req->wb_list)) {
ea2cf228 965 struct pnfs_commit_bucket *bucket;
d6d6dc7c 966
799ba8d5 967 bucket = list_first_entry(&req->wb_list,
ea2cf228 968 struct pnfs_commit_bucket,
799ba8d5
FI
969 written);
970 freeme = bucket->wlseg;
971 bucket->wlseg = NULL;
d6d6dc7c 972 }
8dd37758 973out:
ea2cf228
FI
974 nfs_request_remove_commit_list(req, cinfo);
975 spin_unlock(cinfo->lock);
8dd37758 976 put_lseg(freeme);
d6d6dc7c
FI
977}
978
979static struct list_head *
980filelayout_choose_commit_list(struct nfs_page *req,
ea2cf228
FI
981 struct pnfs_layout_segment *lseg,
982 struct nfs_commit_info *cinfo)
e0c2b380 983{
e0c2b380
FI
984 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
985 u32 i, j;
986 struct list_head *list;
ea2cf228 987 struct pnfs_commit_bucket *buckets;
e0c2b380 988
d6d6dc7c 989 if (fl->commit_through_mds)
ea2cf228 990 return &cinfo->mds->list;
d6d6dc7c 991
e0c2b380
FI
992 /* Note that we are calling nfs4_fl_calc_j_index on each page
993 * that ends up being committed to a data server. An attractive
994 * alternative is to add a field to nfs_write_data and nfs_page
995 * to store the value calculated in filelayout_write_pagelist
996 * and just use that here.
997 */
b5542849 998 j = nfs4_fl_calc_j_index(lseg, req_offset(req));
e0c2b380 999 i = select_bucket_index(fl, j);
ea2cf228 1000 buckets = cinfo->ds->buckets;
799ba8d5 1001 list = &buckets[i].written;
e0c2b380 1002 if (list_empty(list)) {
d6d6dc7c
FI
1003 /* Non-empty buckets hold a reference on the lseg. That ref
1004 * is normally transferred to the COMMIT call and released
1005 * there. It could also be released if the last req is pulled
1006 * off due to a rewrite, in which case it will be done in
799ba8d5 1007 * filelayout_clear_request_commit
d6d6dc7c 1008 */
799ba8d5 1009 buckets[i].wlseg = get_lseg(lseg);
e0c2b380 1010 }
8dd37758 1011 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
ea2cf228 1012 cinfo->ds->nwritten++;
e0c2b380
FI
1013 return list;
1014}
1015
8dd37758
TM
1016static void
1017filelayout_mark_request_commit(struct nfs_page *req,
ea2cf228
FI
1018 struct pnfs_layout_segment *lseg,
1019 struct nfs_commit_info *cinfo)
8dd37758
TM
1020{
1021 struct list_head *list;
1022
ea2cf228
FI
1023 list = filelayout_choose_commit_list(req, lseg, cinfo);
1024 nfs_request_add_commit_list(req, list, cinfo);
8dd37758
TM
1025}
1026
e0c2b380
FI
1027static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1028{
1029 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
1030
1031 if (flseg->stripe_type == STRIPE_SPARSE)
1032 return i;
1033 else
1034 return nfs4_fl_calc_ds_index(lseg, i);
1035}
1036
1037static struct nfs_fh *
1038select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1039{
1040 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
1041
1042 if (flseg->stripe_type == STRIPE_SPARSE) {
1043 if (flseg->num_fh == 1)
1044 i = 0;
1045 else if (flseg->num_fh == 0)
1046 /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
1047 return NULL;
1048 }
1049 return flseg->fh_array[i];
1050}
1051
0b7c0153 1052static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
e0c2b380
FI
1053{
1054 struct pnfs_layout_segment *lseg = data->lseg;
1055 struct nfs4_pnfs_ds *ds;
1056 u32 idx;
1057 struct nfs_fh *fh;
1058
1059 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1060 ds = nfs4_fl_prepare_ds(lseg, idx);
1061 if (!ds) {
e0c2b380 1062 prepare_to_resend_writes(data);
8dd37758 1063 filelayout_commit_release(data);
e0c2b380
FI
1064 return -EAGAIN;
1065 }
3a7936c3
AA
1066 dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
1067 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
0b7c0153 1068 data->commit_done_cb = filelayout_commit_done_cb;
3a7936c3 1069 atomic_inc(&ds->ds_clp->cl_count);
e0c2b380
FI
1070 data->ds_clp = ds->ds_clp;
1071 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1072 if (fh)
1073 data->args.fh = fh;
0b7c0153 1074 return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
9f0ec176
AA
1075 &filelayout_commit_call_ops, how,
1076 RPC_TASK_SOFTCONN);
e0c2b380
FI
1077}
1078
8dd37758 1079static int
1763da12
FI
1080transfer_commit_list(struct list_head *src, struct list_head *dst,
1081 struct nfs_commit_info *cinfo, int max)
8dd37758 1082{
8dd37758
TM
1083 struct nfs_page *req, *tmp;
1084 int ret = 0;
1085
1086 list_for_each_entry_safe(req, tmp, src, wb_list) {
1087 if (!nfs_lock_request(req))
1088 continue;
ea2cf228 1089 if (cond_resched_lock(cinfo->lock))
3b3be88d 1090 list_safe_reset_next(req, tmp, wb_list);
ea2cf228 1091 nfs_request_remove_commit_list(req, cinfo);
8dd37758
TM
1092 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1093 nfs_list_add_request(req, dst);
1094 ret++;
1763da12 1095 if ((ret == max) && !cinfo->dreq)
8dd37758
TM
1096 break;
1097 }
1763da12
FI
1098 return ret;
1099}
1100
1101static int
1102filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
1103 struct nfs_commit_info *cinfo,
1104 int max)
1105{
1106 struct list_head *src = &bucket->written;
1107 struct list_head *dst = &bucket->committing;
1108 int ret;
1109
1110 ret = transfer_commit_list(src, dst, cinfo, max);
799ba8d5 1111 if (ret) {
ea2cf228
FI
1112 cinfo->ds->nwritten -= ret;
1113 cinfo->ds->ncommitting += ret;
799ba8d5
FI
1114 bucket->clseg = bucket->wlseg;
1115 if (list_empty(src))
1116 bucket->wlseg = NULL;
1117 else
1118 get_lseg(bucket->clseg);
1119 }
8dd37758
TM
1120 return ret;
1121}
1122
d6d6dc7c 1123/* Move reqs from written to committing lists, returning count of number moved.
ea2cf228 1124 * Note called with cinfo->lock held.
d6d6dc7c 1125 */
ea2cf228
FI
1126static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
1127 int max)
d6d6dc7c 1128{
d6d6dc7c
FI
1129 int i, rv = 0, cnt;
1130
ea2cf228
FI
1131 for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
1132 cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
1133 cinfo, max);
d6d6dc7c
FI
1134 max -= cnt;
1135 rv += cnt;
1136 }
d6d6dc7c
FI
1137 return rv;
1138}
1139
1763da12
FI
1140/* Pull everything off the committing lists and dump into @dst */
1141static void filelayout_recover_commit_reqs(struct list_head *dst,
1142 struct nfs_commit_info *cinfo)
1143{
1144 struct pnfs_commit_bucket *b;
1145 int i;
1146
1147 /* NOTE cinfo->lock is NOT held, relying on fact that this is
1148 * only called on single thread per dreq.
1149 * Can't take the lock because need to do put_lseg
1150 */
1151 for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
1152 if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
1153 BUG_ON(!list_empty(&b->written));
1154 put_lseg(b->wlseg);
1155 b->wlseg = NULL;
1156 }
1157 }
1158 cinfo->ds->nwritten = 0;
1159}
1160
9390f425 1161static unsigned int
ea2cf228 1162alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
e0c2b380 1163{
ea2cf228
FI
1164 struct pnfs_ds_commit_info *fl_cinfo;
1165 struct pnfs_commit_bucket *bucket;
0b7c0153 1166 struct nfs_commit_data *data;
e0c2b380 1167 int i, j;
9390f425 1168 unsigned int nreq = 0;
e0c2b380 1169
ea2cf228 1170 fl_cinfo = cinfo->ds;
799ba8d5
FI
1171 bucket = fl_cinfo->buckets;
1172 for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
1173 if (list_empty(&bucket->committing))
e0c2b380
FI
1174 continue;
1175 data = nfs_commitdata_alloc();
1176 if (!data)
9390f425 1177 break;
e0c2b380 1178 data->ds_commit_index = i;
799ba8d5
FI
1179 data->lseg = bucket->clseg;
1180 bucket->clseg = NULL;
e0c2b380 1181 list_add(&data->pages, list);
9390f425 1182 nreq++;
e0c2b380 1183 }
e0c2b380 1184
9390f425 1185 /* Clean up on error */
799ba8d5
FI
1186 for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
1187 if (list_empty(&bucket->committing))
e0c2b380 1188 continue;
ea2cf228 1189 nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
799ba8d5
FI
1190 put_lseg(bucket->clseg);
1191 bucket->clseg = NULL;
e0c2b380 1192 }
e0c2b380 1193 /* Caller will clean up entries put on list */
9390f425 1194 return nreq;
e0c2b380
FI
1195}
1196
1197/* This follows nfs_commit_list pretty closely */
1198static int
1199filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
ea2cf228 1200 int how, struct nfs_commit_info *cinfo)
e0c2b380 1201{
0b7c0153 1202 struct nfs_commit_data *data, *tmp;
e0c2b380 1203 LIST_HEAD(list);
9390f425 1204 unsigned int nreq = 0;
e0c2b380
FI
1205
1206 if (!list_empty(mds_pages)) {
1207 data = nfs_commitdata_alloc();
9390f425
TM
1208 if (data != NULL) {
1209 data->lseg = NULL;
1210 list_add(&data->pages, &list);
1211 nreq++;
1212 } else
ea2cf228 1213 nfs_retry_commit(mds_pages, NULL, cinfo);
e0c2b380
FI
1214 }
1215
ea2cf228 1216 nreq += alloc_ds_commits(cinfo, &list);
9390f425
TM
1217
1218 if (nreq == 0) {
f453a54a 1219 cinfo->completion_ops->error_cleanup(NFS_I(inode));
9390f425
TM
1220 goto out;
1221 }
1222
ea2cf228 1223 atomic_add(nreq, &cinfo->mds->rpcs_out);
e0c2b380
FI
1224
1225 list_for_each_entry_safe(data, tmp, &list, pages) {
1226 list_del_init(&data->pages);
e0c2b380 1227 if (!data->lseg) {
f453a54a 1228 nfs_init_commit(data, mds_pages, NULL, cinfo);
0b7c0153 1229 nfs_initiate_commit(NFS_CLIENT(inode), data,
9f0ec176 1230 data->mds_ops, how, 0);
e0c2b380 1231 } else {
ea2cf228 1232 struct pnfs_commit_bucket *buckets;
799ba8d5 1233
ea2cf228 1234 buckets = cinfo->ds->buckets;
f453a54a 1235 nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
e0c2b380
FI
1236 filelayout_initiate_commit(data, how);
1237 }
1238 }
9390f425 1239out:
ea2cf228 1240 cinfo->ds->ncommitting = 0;
9390f425 1241 return PNFS_ATTEMPTED;
e0c2b380
FI
1242}
1243
1775bc34
BH
1244static void
1245filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
1246{
1247 nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
1248}
1249
799ba8d5
FI
1250static struct pnfs_layout_hdr *
1251filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
1252{
1253 struct nfs4_filelayout *flo;
1254
1255 flo = kzalloc(sizeof(*flo), gfp_flags);
1256 return &flo->generic_hdr;
1257}
1258
1259static void
1260filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
1261{
1262 kfree(FILELAYOUT_FROM_HDR(lo));
1263}
1264
ea2cf228
FI
1265static struct pnfs_ds_commit_info *
1266filelayout_get_ds_info(struct inode *inode)
1267{
df011748
FI
1268 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1269
1270 if (layout == NULL)
1271 return NULL;
1272 else
1273 return &FILELAYOUT_FROM_HDR(layout)->commit_info;
ea2cf228
FI
1274}
1275
7ab672ce 1276static struct pnfs_layoutdriver_type filelayout_type = {
ea8eecdd
CH
1277 .id = LAYOUT_NFSV4_1_FILES,
1278 .name = "LAYOUT_NFSV4_1_FILES",
1279 .owner = THIS_MODULE,
799ba8d5
FI
1280 .alloc_layout_hdr = filelayout_alloc_layout_hdr,
1281 .free_layout_hdr = filelayout_free_layout_hdr,
ea8eecdd
CH
1282 .alloc_lseg = filelayout_alloc_lseg,
1283 .free_lseg = filelayout_free_lseg,
1751c363
TM
1284 .pg_read_ops = &filelayout_pg_read_ops,
1285 .pg_write_ops = &filelayout_pg_write_ops,
ea2cf228 1286 .get_ds_info = &filelayout_get_ds_info,
8dd37758
TM
1287 .mark_request_commit = filelayout_mark_request_commit,
1288 .clear_request_commit = filelayout_clear_request_commit,
d6d6dc7c 1289 .scan_commit_lists = filelayout_scan_commit_lists,
1763da12 1290 .recover_commit_reqs = filelayout_recover_commit_reqs,
e0c2b380 1291 .commit_pagelist = filelayout_commit_pagelist,
dc70d7b3 1292 .read_pagelist = filelayout_read_pagelist,
0382b744 1293 .write_pagelist = filelayout_write_pagelist,
1775bc34 1294 .free_deviceid_node = filelayout_free_deveiceid_node,
7ab672ce
DH
1295};
1296
1297static int __init nfs4filelayout_init(void)
1298{
1299 printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
1300 __func__);
1301 return pnfs_register_layoutdriver(&filelayout_type);
1302}
1303
1304static void __exit nfs4filelayout_exit(void)
1305{
1306 printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
1307 __func__);
1308 pnfs_unregister_layoutdriver(&filelayout_type);
1309}
1310
f85ef69c
BF
1311MODULE_ALIAS("nfs-layouttype4-1");
1312
7ab672ce
DH
1313module_init(nfs4filelayout_init);
1314module_exit(nfs4filelayout_exit);