Merge branch 'pci/resource' into next
[deliverable/linux.git] / fs / nfs / nfs4filelayout.c
1 /*
2 * Module for the pnfs nfs4 file layout driver.
3 * Defines all I/O and Policy interface operations, plus code
4 * to register itself with the pNFS client.
5 *
6 * Copyright (c) 2002
7 * The Regents of the University of Michigan
8 * All Rights Reserved
9 *
10 * Dean Hildebrand <dhildebz@umich.edu>
11 *
12 * Permission is granted to use, copy, create derivative works, and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the University of Michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. If
17 * the above copyright notice or any other identification of the
18 * University of Michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * This software is provided as is, without representation or warranty
22 * of any kind either express or implied, including without limitation
23 * the implied warranties of merchantability, fitness for a particular
24 * purpose, or noninfringement. The Regents of the University of
25 * Michigan shall not be liable for any damages, including special,
26 * indirect, incidental, or consequential damages, with respect to any
27 * claim arising out of or in connection with the use of the software,
28 * even if it has been or is hereafter advised of the possibility of
29 * such damages.
30 */
31
32 #include <linux/nfs_fs.h>
33 #include <linux/nfs_page.h>
34 #include <linux/module.h>
35
36 #include <linux/sunrpc/metrics.h>
37
38 #include "nfs4session.h"
39 #include "internal.h"
40 #include "delegation.h"
41 #include "nfs4filelayout.h"
42 #include "nfs4trace.h"
43
44 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
45
46 MODULE_LICENSE("GPL");
47 MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
48 MODULE_DESCRIPTION("The NFSv4 file layout driver");
49
50 #define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
51
52 static loff_t
53 filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
54 loff_t offset)
55 {
56 u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
57 u64 stripe_no;
58 u32 rem;
59
60 offset -= flseg->pattern_offset;
61 stripe_no = div_u64(offset, stripe_width);
62 div_u64_rem(offset, flseg->stripe_unit, &rem);
63
64 return stripe_no * flseg->stripe_unit + rem;
65 }
66
67 /* This function is used by the layout driver to calculate the
68 * offset of the file on the dserver based on whether the
69 * layout type is STRIPE_DENSE or STRIPE_SPARSE
70 */
71 static loff_t
72 filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
73 {
74 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
75
76 switch (flseg->stripe_type) {
77 case STRIPE_SPARSE:
78 return offset;
79
80 case STRIPE_DENSE:
81 return filelayout_get_dense_offset(flseg, offset);
82 }
83
84 BUG();
85 }
86
87 static void filelayout_reset_write(struct nfs_write_data *data)
88 {
89 struct nfs_pgio_header *hdr = data->header;
90 struct rpc_task *task = &data->task;
91
92 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
93 dprintk("%s Reset task %5u for i/o through MDS "
94 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
95 data->task.tk_pid,
96 hdr->inode->i_sb->s_id,
97 (unsigned long long)NFS_FILEID(hdr->inode),
98 data->args.count,
99 (unsigned long long)data->args.offset);
100
101 task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
102 &hdr->pages,
103 hdr->completion_ops,
104 hdr->dreq);
105 }
106 }
107
108 static void filelayout_reset_read(struct nfs_read_data *data)
109 {
110 struct nfs_pgio_header *hdr = data->header;
111 struct rpc_task *task = &data->task;
112
113 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
114 dprintk("%s Reset task %5u for i/o through MDS "
115 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
116 data->task.tk_pid,
117 hdr->inode->i_sb->s_id,
118 (unsigned long long)NFS_FILEID(hdr->inode),
119 data->args.count,
120 (unsigned long long)data->args.offset);
121
122 task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
123 &hdr->pages,
124 hdr->completion_ops,
125 hdr->dreq);
126 }
127 }
128
129 static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
130 {
131 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
132 return;
133 pnfs_return_layout(inode);
134 }
135
136 static int filelayout_async_handle_error(struct rpc_task *task,
137 struct nfs4_state *state,
138 struct nfs_client *clp,
139 struct pnfs_layout_segment *lseg)
140 {
141 struct pnfs_layout_hdr *lo = lseg->pls_layout;
142 struct inode *inode = lo->plh_inode;
143 struct nfs_server *mds_server = NFS_SERVER(inode);
144 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
145 struct nfs_client *mds_client = mds_server->nfs_client;
146 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
147
148 if (task->tk_status >= 0)
149 return 0;
150
151 switch (task->tk_status) {
152 /* MDS state errors */
153 case -NFS4ERR_DELEG_REVOKED:
154 case -NFS4ERR_ADMIN_REVOKED:
155 case -NFS4ERR_BAD_STATEID:
156 if (state == NULL)
157 break;
158 nfs_remove_bad_delegation(state->inode);
159 case -NFS4ERR_OPENMODE:
160 if (state == NULL)
161 break;
162 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
163 goto out_bad_stateid;
164 goto wait_on_recovery;
165 case -NFS4ERR_EXPIRED:
166 if (state != NULL) {
167 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
168 goto out_bad_stateid;
169 }
170 nfs4_schedule_lease_recovery(mds_client);
171 goto wait_on_recovery;
172 /* DS session errors */
173 case -NFS4ERR_BADSESSION:
174 case -NFS4ERR_BADSLOT:
175 case -NFS4ERR_BAD_HIGH_SLOT:
176 case -NFS4ERR_DEADSESSION:
177 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
178 case -NFS4ERR_SEQ_FALSE_RETRY:
179 case -NFS4ERR_SEQ_MISORDERED:
180 dprintk("%s ERROR %d, Reset session. Exchangeid "
181 "flags 0x%x\n", __func__, task->tk_status,
182 clp->cl_exchange_flags);
183 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
184 break;
185 case -NFS4ERR_DELAY:
186 case -NFS4ERR_GRACE:
187 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
188 break;
189 case -NFS4ERR_RETRY_UNCACHED_REP:
190 break;
191 /* Invalidate Layout errors */
192 case -NFS4ERR_PNFS_NO_LAYOUT:
193 case -ESTALE: /* mapped NFS4ERR_STALE */
194 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
195 case -EISDIR: /* mapped NFS4ERR_ISDIR */
196 case -NFS4ERR_FHEXPIRED:
197 case -NFS4ERR_WRONG_TYPE:
198 dprintk("%s Invalid layout error %d\n", __func__,
199 task->tk_status);
200 /*
201 * Destroy layout so new i/o will get a new layout.
202 * Layout will not be destroyed until all current lseg
203 * references are put. Mark layout as invalid to resend failed
204 * i/o and all i/o waiting on the slot table to the MDS until
205 * layout is destroyed and a new valid layout is obtained.
206 */
207 pnfs_destroy_layout(NFS_I(inode));
208 rpc_wake_up(&tbl->slot_tbl_waitq);
209 goto reset;
210 /* RPC connection errors */
211 case -ECONNREFUSED:
212 case -EHOSTDOWN:
213 case -EHOSTUNREACH:
214 case -ENETUNREACH:
215 case -EIO:
216 case -ETIMEDOUT:
217 case -EPIPE:
218 dprintk("%s DS connection error %d\n", __func__,
219 task->tk_status);
220 nfs4_mark_deviceid_unavailable(devid);
221 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
222 rpc_wake_up(&tbl->slot_tbl_waitq);
223 /* fall through */
224 default:
225 reset:
226 dprintk("%s Retry through MDS. Error %d\n", __func__,
227 task->tk_status);
228 return -NFS4ERR_RESET_TO_MDS;
229 }
230 out:
231 task->tk_status = 0;
232 return -EAGAIN;
233 out_bad_stateid:
234 task->tk_status = -EIO;
235 return 0;
236 wait_on_recovery:
237 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
238 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
239 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
240 goto out;
241 }
242
243 /* NFS_PROTO call done callback routines */
244
245 static int filelayout_read_done_cb(struct rpc_task *task,
246 struct nfs_read_data *data)
247 {
248 struct nfs_pgio_header *hdr = data->header;
249 int err;
250
251 trace_nfs4_pnfs_read(data, task->tk_status);
252 err = filelayout_async_handle_error(task, data->args.context->state,
253 data->ds_clp, hdr->lseg);
254
255 switch (err) {
256 case -NFS4ERR_RESET_TO_MDS:
257 filelayout_reset_read(data);
258 return task->tk_status;
259 case -EAGAIN:
260 rpc_restart_call_prepare(task);
261 return -EAGAIN;
262 }
263
264 return 0;
265 }
266
267 /*
268 * We reference the rpc_cred of the first WRITE that triggers the need for
269 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
270 * rfc5661 is not clear about which credential should be used.
271 */
272 static void
273 filelayout_set_layoutcommit(struct nfs_write_data *wdata)
274 {
275 struct nfs_pgio_header *hdr = wdata->header;
276
277 if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
278 wdata->res.verf->committed == NFS_FILE_SYNC)
279 return;
280
281 pnfs_set_layoutcommit(wdata);
282 dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
283 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
284 }
285
286 bool
287 filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node)
288 {
289 return filelayout_test_devid_invalid(node) ||
290 nfs4_test_deviceid_unavailable(node);
291 }
292
293 static bool
294 filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
295 {
296 struct nfs4_deviceid_node *node = FILELAYOUT_DEVID_NODE(lseg);
297
298 return filelayout_test_devid_unavailable(node);
299 }
300
301 /*
302 * Call ops for the async read/write cases
303 * In the case of dense layouts, the offset needs to be reset to its
304 * original value.
305 */
306 static void filelayout_read_prepare(struct rpc_task *task, void *data)
307 {
308 struct nfs_read_data *rdata = data;
309
310 if (unlikely(test_bit(NFS_CONTEXT_BAD, &rdata->args.context->flags))) {
311 rpc_exit(task, -EIO);
312 return;
313 }
314 if (filelayout_reset_to_mds(rdata->header->lseg)) {
315 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
316 filelayout_reset_read(rdata);
317 rpc_exit(task, 0);
318 return;
319 }
320 rdata->read_done_cb = filelayout_read_done_cb;
321
322 if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
323 &rdata->args.seq_args,
324 &rdata->res.seq_res,
325 task))
326 return;
327 nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
328 rdata->args.lock_context, FMODE_READ);
329 }
330
331 static void filelayout_read_call_done(struct rpc_task *task, void *data)
332 {
333 struct nfs_read_data *rdata = data;
334
335 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
336
337 if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
338 task->tk_status == 0) {
339 nfs41_sequence_done(task, &rdata->res.seq_res);
340 return;
341 }
342
343 /* Note this may cause RPC to be resent */
344 rdata->header->mds_ops->rpc_call_done(task, data);
345 }
346
347 static void filelayout_read_count_stats(struct rpc_task *task, void *data)
348 {
349 struct nfs_read_data *rdata = data;
350
351 rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
352 }
353
354 static void filelayout_read_release(void *data)
355 {
356 struct nfs_read_data *rdata = data;
357 struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout;
358
359 filelayout_fenceme(lo->plh_inode, lo);
360 nfs_put_client(rdata->ds_clp);
361 rdata->header->mds_ops->rpc_release(data);
362 }
363
364 static int filelayout_write_done_cb(struct rpc_task *task,
365 struct nfs_write_data *data)
366 {
367 struct nfs_pgio_header *hdr = data->header;
368 int err;
369
370 trace_nfs4_pnfs_write(data, task->tk_status);
371 err = filelayout_async_handle_error(task, data->args.context->state,
372 data->ds_clp, hdr->lseg);
373
374 switch (err) {
375 case -NFS4ERR_RESET_TO_MDS:
376 filelayout_reset_write(data);
377 return task->tk_status;
378 case -EAGAIN:
379 rpc_restart_call_prepare(task);
380 return -EAGAIN;
381 }
382
383 filelayout_set_layoutcommit(data);
384 return 0;
385 }
386
387 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
388 static void prepare_to_resend_writes(struct nfs_commit_data *data)
389 {
390 struct nfs_page *first = nfs_list_entry(data->pages.next);
391
392 data->task.tk_status = 0;
393 memcpy(&data->verf.verifier, &first->wb_verf,
394 sizeof(data->verf.verifier));
395 data->verf.verifier.data[0]++; /* ensure verifier mismatch */
396 }
397
398 static int filelayout_commit_done_cb(struct rpc_task *task,
399 struct nfs_commit_data *data)
400 {
401 int err;
402
403 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
404 err = filelayout_async_handle_error(task, NULL, data->ds_clp,
405 data->lseg);
406
407 switch (err) {
408 case -NFS4ERR_RESET_TO_MDS:
409 prepare_to_resend_writes(data);
410 return -EAGAIN;
411 case -EAGAIN:
412 rpc_restart_call_prepare(task);
413 return -EAGAIN;
414 }
415
416 return 0;
417 }
418
419 static void filelayout_write_prepare(struct rpc_task *task, void *data)
420 {
421 struct nfs_write_data *wdata = data;
422
423 if (unlikely(test_bit(NFS_CONTEXT_BAD, &wdata->args.context->flags))) {
424 rpc_exit(task, -EIO);
425 return;
426 }
427 if (filelayout_reset_to_mds(wdata->header->lseg)) {
428 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
429 filelayout_reset_write(wdata);
430 rpc_exit(task, 0);
431 return;
432 }
433 if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
434 &wdata->args.seq_args,
435 &wdata->res.seq_res,
436 task))
437 return;
438 nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
439 wdata->args.lock_context, FMODE_WRITE);
440 }
441
442 static void filelayout_write_call_done(struct rpc_task *task, void *data)
443 {
444 struct nfs_write_data *wdata = data;
445
446 if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
447 task->tk_status == 0) {
448 nfs41_sequence_done(task, &wdata->res.seq_res);
449 return;
450 }
451
452 /* Note this may cause RPC to be resent */
453 wdata->header->mds_ops->rpc_call_done(task, data);
454 }
455
456 static void filelayout_write_count_stats(struct rpc_task *task, void *data)
457 {
458 struct nfs_write_data *wdata = data;
459
460 rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
461 }
462
463 static void filelayout_write_release(void *data)
464 {
465 struct nfs_write_data *wdata = data;
466 struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout;
467
468 filelayout_fenceme(lo->plh_inode, lo);
469 nfs_put_client(wdata->ds_clp);
470 wdata->header->mds_ops->rpc_release(data);
471 }
472
473 static void filelayout_commit_prepare(struct rpc_task *task, void *data)
474 {
475 struct nfs_commit_data *wdata = data;
476
477 nfs41_setup_sequence(wdata->ds_clp->cl_session,
478 &wdata->args.seq_args,
479 &wdata->res.seq_res,
480 task);
481 }
482
483 static void filelayout_write_commit_done(struct rpc_task *task, void *data)
484 {
485 struct nfs_commit_data *wdata = data;
486
487 /* Note this may cause RPC to be resent */
488 wdata->mds_ops->rpc_call_done(task, data);
489 }
490
491 static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
492 {
493 struct nfs_commit_data *cdata = data;
494
495 rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
496 }
497
498 static void filelayout_commit_release(void *calldata)
499 {
500 struct nfs_commit_data *data = calldata;
501
502 data->completion_ops->completion(data);
503 pnfs_put_lseg(data->lseg);
504 nfs_put_client(data->ds_clp);
505 nfs_commitdata_release(data);
506 }
507
508 static const struct rpc_call_ops filelayout_read_call_ops = {
509 .rpc_call_prepare = filelayout_read_prepare,
510 .rpc_call_done = filelayout_read_call_done,
511 .rpc_count_stats = filelayout_read_count_stats,
512 .rpc_release = filelayout_read_release,
513 };
514
515 static const struct rpc_call_ops filelayout_write_call_ops = {
516 .rpc_call_prepare = filelayout_write_prepare,
517 .rpc_call_done = filelayout_write_call_done,
518 .rpc_count_stats = filelayout_write_count_stats,
519 .rpc_release = filelayout_write_release,
520 };
521
522 static const struct rpc_call_ops filelayout_commit_call_ops = {
523 .rpc_call_prepare = filelayout_commit_prepare,
524 .rpc_call_done = filelayout_write_commit_done,
525 .rpc_count_stats = filelayout_commit_count_stats,
526 .rpc_release = filelayout_commit_release,
527 };
528
529 static enum pnfs_try_status
530 filelayout_read_pagelist(struct nfs_read_data *data)
531 {
532 struct nfs_pgio_header *hdr = data->header;
533 struct pnfs_layout_segment *lseg = hdr->lseg;
534 struct nfs4_pnfs_ds *ds;
535 struct rpc_clnt *ds_clnt;
536 loff_t offset = data->args.offset;
537 u32 j, idx;
538 struct nfs_fh *fh;
539
540 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
541 __func__, hdr->inode->i_ino,
542 data->args.pgbase, (size_t)data->args.count, offset);
543
544 /* Retrieve the correct rpc_client for the byte range */
545 j = nfs4_fl_calc_j_index(lseg, offset);
546 idx = nfs4_fl_calc_ds_index(lseg, j);
547 ds = nfs4_fl_prepare_ds(lseg, idx);
548 if (!ds)
549 return PNFS_NOT_ATTEMPTED;
550
551 ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, hdr->inode);
552 if (IS_ERR(ds_clnt))
553 return PNFS_NOT_ATTEMPTED;
554
555 dprintk("%s USE DS: %s cl_count %d\n", __func__,
556 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
557
558 /* No multipath support. Use first DS */
559 atomic_inc(&ds->ds_clp->cl_count);
560 data->ds_clp = ds->ds_clp;
561 fh = nfs4_fl_select_ds_fh(lseg, j);
562 if (fh)
563 data->args.fh = fh;
564
565 data->args.offset = filelayout_get_dserver_offset(lseg, offset);
566 data->mds_offset = offset;
567
568 /* Perform an asynchronous read to ds */
569 nfs_initiate_read(ds_clnt, data,
570 &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
571 return PNFS_ATTEMPTED;
572 }
573
574 /* Perform async writes. */
575 static enum pnfs_try_status
576 filelayout_write_pagelist(struct nfs_write_data *data, int sync)
577 {
578 struct nfs_pgio_header *hdr = data->header;
579 struct pnfs_layout_segment *lseg = hdr->lseg;
580 struct nfs4_pnfs_ds *ds;
581 struct rpc_clnt *ds_clnt;
582 loff_t offset = data->args.offset;
583 u32 j, idx;
584 struct nfs_fh *fh;
585
586 /* Retrieve the correct rpc_client for the byte range */
587 j = nfs4_fl_calc_j_index(lseg, offset);
588 idx = nfs4_fl_calc_ds_index(lseg, j);
589 ds = nfs4_fl_prepare_ds(lseg, idx);
590 if (!ds)
591 return PNFS_NOT_ATTEMPTED;
592
593 ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, hdr->inode);
594 if (IS_ERR(ds_clnt))
595 return PNFS_NOT_ATTEMPTED;
596
597 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
598 __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
599 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
600
601 data->write_done_cb = filelayout_write_done_cb;
602 atomic_inc(&ds->ds_clp->cl_count);
603 data->ds_clp = ds->ds_clp;
604 fh = nfs4_fl_select_ds_fh(lseg, j);
605 if (fh)
606 data->args.fh = fh;
607 /*
608 * Get the file offset on the dserver. Set the write offset to
609 * this offset and save the original offset.
610 */
611 data->args.offset = filelayout_get_dserver_offset(lseg, offset);
612
613 /* Perform an asynchronous write */
614 nfs_initiate_write(ds_clnt, data,
615 &filelayout_write_call_ops, sync,
616 RPC_TASK_SOFTCONN);
617 return PNFS_ATTEMPTED;
618 }
619
620 /*
621 * filelayout_check_layout()
622 *
623 * Make sure layout segment parameters are sane WRT the device.
624 * At this point no generic layer initialization of the lseg has occurred,
625 * and nothing has been added to the layout_hdr cache.
626 *
627 */
628 static int
629 filelayout_check_layout(struct pnfs_layout_hdr *lo,
630 struct nfs4_filelayout_segment *fl,
631 struct nfs4_layoutget_res *lgr,
632 struct nfs4_deviceid *id,
633 gfp_t gfp_flags)
634 {
635 struct nfs4_deviceid_node *d;
636 struct nfs4_file_layout_dsaddr *dsaddr;
637 int status = -EINVAL;
638 struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
639
640 dprintk("--> %s\n", __func__);
641
642 /* FIXME: remove this check when layout segment support is added */
643 if (lgr->range.offset != 0 ||
644 lgr->range.length != NFS4_MAX_UINT64) {
645 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
646 __func__);
647 goto out;
648 }
649
650 if (fl->pattern_offset > lgr->range.offset) {
651 dprintk("%s pattern_offset %lld too large\n",
652 __func__, fl->pattern_offset);
653 goto out;
654 }
655
656 if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) {
657 dprintk("%s Invalid stripe unit (%u)\n",
658 __func__, fl->stripe_unit);
659 goto out;
660 }
661
662 /* find and reference the deviceid */
663 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
664 NFS_SERVER(lo->plh_inode)->nfs_client, id);
665 if (d == NULL) {
666 dsaddr = filelayout_get_device_info(lo->plh_inode, id,
667 lo->plh_lc_cred, gfp_flags);
668 if (dsaddr == NULL)
669 goto out;
670 } else
671 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
672 /* Found deviceid is unavailable */
673 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
674 goto out_put;
675
676 fl->dsaddr = dsaddr;
677
678 if (fl->first_stripe_index >= dsaddr->stripe_count) {
679 dprintk("%s Bad first_stripe_index %u\n",
680 __func__, fl->first_stripe_index);
681 goto out_put;
682 }
683
684 if ((fl->stripe_type == STRIPE_SPARSE &&
685 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
686 (fl->stripe_type == STRIPE_DENSE &&
687 fl->num_fh != dsaddr->stripe_count)) {
688 dprintk("%s num_fh %u not valid for given packing\n",
689 __func__, fl->num_fh);
690 goto out_put;
691 }
692
693 if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
694 dprintk("%s Stripe unit (%u) not aligned with rsize %u "
695 "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
696 nfss->wsize);
697 }
698
699 status = 0;
700 out:
701 dprintk("--> %s returns %d\n", __func__, status);
702 return status;
703 out_put:
704 nfs4_fl_put_deviceid(dsaddr);
705 goto out;
706 }
707
708 static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
709 {
710 int i;
711
712 for (i = 0; i < fl->num_fh; i++) {
713 if (!fl->fh_array[i])
714 break;
715 kfree(fl->fh_array[i]);
716 }
717 kfree(fl->fh_array);
718 fl->fh_array = NULL;
719 }
720
721 static void
722 _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
723 {
724 filelayout_free_fh_array(fl);
725 kfree(fl);
726 }
727
728 static int
729 filelayout_decode_layout(struct pnfs_layout_hdr *flo,
730 struct nfs4_filelayout_segment *fl,
731 struct nfs4_layoutget_res *lgr,
732 struct nfs4_deviceid *id,
733 gfp_t gfp_flags)
734 {
735 struct xdr_stream stream;
736 struct xdr_buf buf;
737 struct page *scratch;
738 __be32 *p;
739 uint32_t nfl_util;
740 int i;
741
742 dprintk("%s: set_layout_map Begin\n", __func__);
743
744 scratch = alloc_page(gfp_flags);
745 if (!scratch)
746 return -ENOMEM;
747
748 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
749 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
750
751 /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
752 * num_fh (4) */
753 p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
754 if (unlikely(!p))
755 goto out_err;
756
757 memcpy(id, p, sizeof(*id));
758 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
759 nfs4_print_deviceid(id);
760
761 nfl_util = be32_to_cpup(p++);
762 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
763 fl->commit_through_mds = 1;
764 if (nfl_util & NFL4_UFLG_DENSE)
765 fl->stripe_type = STRIPE_DENSE;
766 else
767 fl->stripe_type = STRIPE_SPARSE;
768 fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
769
770 fl->first_stripe_index = be32_to_cpup(p++);
771 p = xdr_decode_hyper(p, &fl->pattern_offset);
772 fl->num_fh = be32_to_cpup(p++);
773
774 dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
775 __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
776 fl->pattern_offset);
777
778 /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
779 * Futher checking is done in filelayout_check_layout */
780 if (fl->num_fh >
781 max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
782 goto out_err;
783
784 if (fl->num_fh > 0) {
785 fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]),
786 gfp_flags);
787 if (!fl->fh_array)
788 goto out_err;
789 }
790
791 for (i = 0; i < fl->num_fh; i++) {
792 /* Do we want to use a mempool here? */
793 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
794 if (!fl->fh_array[i])
795 goto out_err_free;
796
797 p = xdr_inline_decode(&stream, 4);
798 if (unlikely(!p))
799 goto out_err_free;
800 fl->fh_array[i]->size = be32_to_cpup(p++);
801 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
802 printk(KERN_ERR "NFS: Too big fh %d received %d\n",
803 i, fl->fh_array[i]->size);
804 goto out_err_free;
805 }
806
807 p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
808 if (unlikely(!p))
809 goto out_err_free;
810 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
811 dprintk("DEBUG: %s: fh len %d\n", __func__,
812 fl->fh_array[i]->size);
813 }
814
815 __free_page(scratch);
816 return 0;
817
818 out_err_free:
819 filelayout_free_fh_array(fl);
820 out_err:
821 __free_page(scratch);
822 return -EIO;
823 }
824
825 static void
826 filelayout_free_lseg(struct pnfs_layout_segment *lseg)
827 {
828 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
829
830 dprintk("--> %s\n", __func__);
831 nfs4_fl_put_deviceid(fl->dsaddr);
832 /* This assumes a single RW lseg */
833 if (lseg->pls_range.iomode == IOMODE_RW) {
834 struct nfs4_filelayout *flo;
835
836 flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
837 flo->commit_info.nbuckets = 0;
838 kfree(flo->commit_info.buckets);
839 flo->commit_info.buckets = NULL;
840 }
841 _filelayout_free_lseg(fl);
842 }
843
844 static int
845 filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
846 struct nfs_commit_info *cinfo,
847 gfp_t gfp_flags)
848 {
849 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
850 struct pnfs_commit_bucket *buckets;
851 int size;
852
853 if (fl->commit_through_mds)
854 return 0;
855 if (cinfo->ds->nbuckets != 0) {
856 /* This assumes there is only one IOMODE_RW lseg. What
857 * we really want to do is have a layout_hdr level
858 * dictionary of <multipath_list4, fh> keys, each
859 * associated with a struct list_head, populated by calls
860 * to filelayout_write_pagelist().
861 * */
862 return 0;
863 }
864
865 size = (fl->stripe_type == STRIPE_SPARSE) ?
866 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
867
868 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
869 gfp_flags);
870 if (!buckets)
871 return -ENOMEM;
872 else {
873 int i;
874
875 spin_lock(cinfo->lock);
876 if (cinfo->ds->nbuckets != 0)
877 kfree(buckets);
878 else {
879 cinfo->ds->buckets = buckets;
880 cinfo->ds->nbuckets = size;
881 for (i = 0; i < size; i++) {
882 INIT_LIST_HEAD(&buckets[i].written);
883 INIT_LIST_HEAD(&buckets[i].committing);
884 }
885 }
886 spin_unlock(cinfo->lock);
887 return 0;
888 }
889 }
890
891 static struct pnfs_layout_segment *
892 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
893 struct nfs4_layoutget_res *lgr,
894 gfp_t gfp_flags)
895 {
896 struct nfs4_filelayout_segment *fl;
897 int rc;
898 struct nfs4_deviceid id;
899
900 dprintk("--> %s\n", __func__);
901 fl = kzalloc(sizeof(*fl), gfp_flags);
902 if (!fl)
903 return NULL;
904
905 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
906 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
907 _filelayout_free_lseg(fl);
908 return NULL;
909 }
910 return &fl->generic_hdr;
911 }
912
913 /*
914 * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
915 *
916 * return true : coalesce page
917 * return false : don't coalesce page
918 */
919 static bool
920 filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
921 struct nfs_page *req)
922 {
923 u64 p_stripe, r_stripe;
924 u32 stripe_unit;
925
926 if (!pnfs_generic_pg_test(pgio, prev, req) ||
927 !nfs_generic_pg_test(pgio, prev, req))
928 return false;
929
930 p_stripe = (u64)req_offset(prev);
931 r_stripe = (u64)req_offset(req);
932 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
933
934 do_div(p_stripe, stripe_unit);
935 do_div(r_stripe, stripe_unit);
936
937 return (p_stripe == r_stripe);
938 }
939
940 static void
941 filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
942 struct nfs_page *req)
943 {
944 WARN_ON_ONCE(pgio->pg_lseg != NULL);
945
946 if (req->wb_offset != req->wb_pgbase) {
947 /*
948 * Handling unaligned pages is difficult, because have to
949 * somehow split a req in two in certain cases in the
950 * pg.test code. Avoid this by just not using pnfs
951 * in this case.
952 */
953 nfs_pageio_reset_read_mds(pgio);
954 return;
955 }
956 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
957 req->wb_context,
958 0,
959 NFS4_MAX_UINT64,
960 IOMODE_READ,
961 GFP_KERNEL);
962 /* If no lseg, fall back to read through mds */
963 if (pgio->pg_lseg == NULL)
964 nfs_pageio_reset_read_mds(pgio);
965 }
966
967 static void
968 filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
969 struct nfs_page *req)
970 {
971 struct nfs_commit_info cinfo;
972 int status;
973
974 WARN_ON_ONCE(pgio->pg_lseg != NULL);
975
976 if (req->wb_offset != req->wb_pgbase)
977 goto out_mds;
978 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
979 req->wb_context,
980 0,
981 NFS4_MAX_UINT64,
982 IOMODE_RW,
983 GFP_NOFS);
984 /* If no lseg, fall back to write through mds */
985 if (pgio->pg_lseg == NULL)
986 goto out_mds;
987 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
988 status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
989 if (status < 0) {
990 pnfs_put_lseg(pgio->pg_lseg);
991 pgio->pg_lseg = NULL;
992 goto out_mds;
993 }
994 return;
995 out_mds:
996 nfs_pageio_reset_write_mds(pgio);
997 }
998
999 static const struct nfs_pageio_ops filelayout_pg_read_ops = {
1000 .pg_init = filelayout_pg_init_read,
1001 .pg_test = filelayout_pg_test,
1002 .pg_doio = pnfs_generic_pg_readpages,
1003 };
1004
1005 static const struct nfs_pageio_ops filelayout_pg_write_ops = {
1006 .pg_init = filelayout_pg_init_write,
1007 .pg_test = filelayout_pg_test,
1008 .pg_doio = pnfs_generic_pg_writepages,
1009 };
1010
1011 static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
1012 {
1013 if (fl->stripe_type == STRIPE_SPARSE)
1014 return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
1015 else
1016 return j;
1017 }
1018
1019 /* The generic layer is about to remove the req from the commit list.
1020 * If this will make the bucket empty, it will need to put the lseg reference.
1021 */
1022 static void
1023 filelayout_clear_request_commit(struct nfs_page *req,
1024 struct nfs_commit_info *cinfo)
1025 {
1026 struct pnfs_layout_segment *freeme = NULL;
1027
1028 spin_lock(cinfo->lock);
1029 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
1030 goto out;
1031 cinfo->ds->nwritten--;
1032 if (list_is_singular(&req->wb_list)) {
1033 struct pnfs_commit_bucket *bucket;
1034
1035 bucket = list_first_entry(&req->wb_list,
1036 struct pnfs_commit_bucket,
1037 written);
1038 freeme = bucket->wlseg;
1039 bucket->wlseg = NULL;
1040 }
1041 out:
1042 nfs_request_remove_commit_list(req, cinfo);
1043 spin_unlock(cinfo->lock);
1044 pnfs_put_lseg(freeme);
1045 }
1046
1047 static struct list_head *
1048 filelayout_choose_commit_list(struct nfs_page *req,
1049 struct pnfs_layout_segment *lseg,
1050 struct nfs_commit_info *cinfo)
1051 {
1052 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
1053 u32 i, j;
1054 struct list_head *list;
1055 struct pnfs_commit_bucket *buckets;
1056
1057 if (fl->commit_through_mds)
1058 return &cinfo->mds->list;
1059
1060 /* Note that we are calling nfs4_fl_calc_j_index on each page
1061 * that ends up being committed to a data server. An attractive
1062 * alternative is to add a field to nfs_write_data and nfs_page
1063 * to store the value calculated in filelayout_write_pagelist
1064 * and just use that here.
1065 */
1066 j = nfs4_fl_calc_j_index(lseg, req_offset(req));
1067 i = select_bucket_index(fl, j);
1068 buckets = cinfo->ds->buckets;
1069 list = &buckets[i].written;
1070 if (list_empty(list)) {
1071 /* Non-empty buckets hold a reference on the lseg. That ref
1072 * is normally transferred to the COMMIT call and released
1073 * there. It could also be released if the last req is pulled
1074 * off due to a rewrite, in which case it will be done in
1075 * filelayout_clear_request_commit
1076 */
1077 buckets[i].wlseg = pnfs_get_lseg(lseg);
1078 }
1079 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1080 cinfo->ds->nwritten++;
1081 return list;
1082 }
1083
1084 static void
1085 filelayout_mark_request_commit(struct nfs_page *req,
1086 struct pnfs_layout_segment *lseg,
1087 struct nfs_commit_info *cinfo)
1088 {
1089 struct list_head *list;
1090
1091 list = filelayout_choose_commit_list(req, lseg, cinfo);
1092 nfs_request_add_commit_list(req, list, cinfo);
1093 }
1094
1095 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1096 {
1097 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
1098
1099 if (flseg->stripe_type == STRIPE_SPARSE)
1100 return i;
1101 else
1102 return nfs4_fl_calc_ds_index(lseg, i);
1103 }
1104
1105 static struct nfs_fh *
1106 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1107 {
1108 struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
1109
1110 if (flseg->stripe_type == STRIPE_SPARSE) {
1111 if (flseg->num_fh == 1)
1112 i = 0;
1113 else if (flseg->num_fh == 0)
1114 /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
1115 return NULL;
1116 }
1117 return flseg->fh_array[i];
1118 }
1119
1120 static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
1121 {
1122 struct pnfs_layout_segment *lseg = data->lseg;
1123 struct nfs4_pnfs_ds *ds;
1124 struct rpc_clnt *ds_clnt;
1125 u32 idx;
1126 struct nfs_fh *fh;
1127
1128 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1129 ds = nfs4_fl_prepare_ds(lseg, idx);
1130 if (!ds)
1131 goto out_err;
1132
1133 ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, data->inode);
1134 if (IS_ERR(ds_clnt))
1135 goto out_err;
1136
1137 dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
1138 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
1139 data->commit_done_cb = filelayout_commit_done_cb;
1140 atomic_inc(&ds->ds_clp->cl_count);
1141 data->ds_clp = ds->ds_clp;
1142 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1143 if (fh)
1144 data->args.fh = fh;
1145 return nfs_initiate_commit(ds_clnt, data,
1146 &filelayout_commit_call_ops, how,
1147 RPC_TASK_SOFTCONN);
1148 out_err:
1149 prepare_to_resend_writes(data);
1150 filelayout_commit_release(data);
1151 return -EAGAIN;
1152 }
1153
1154 static int
1155 transfer_commit_list(struct list_head *src, struct list_head *dst,
1156 struct nfs_commit_info *cinfo, int max)
1157 {
1158 struct nfs_page *req, *tmp;
1159 int ret = 0;
1160
1161 list_for_each_entry_safe(req, tmp, src, wb_list) {
1162 if (!nfs_lock_request(req))
1163 continue;
1164 kref_get(&req->wb_kref);
1165 if (cond_resched_lock(cinfo->lock))
1166 list_safe_reset_next(req, tmp, wb_list);
1167 nfs_request_remove_commit_list(req, cinfo);
1168 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1169 nfs_list_add_request(req, dst);
1170 ret++;
1171 if ((ret == max) && !cinfo->dreq)
1172 break;
1173 }
1174 return ret;
1175 }
1176
1177 static int
1178 filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
1179 struct nfs_commit_info *cinfo,
1180 int max)
1181 {
1182 struct list_head *src = &bucket->written;
1183 struct list_head *dst = &bucket->committing;
1184 int ret;
1185
1186 ret = transfer_commit_list(src, dst, cinfo, max);
1187 if (ret) {
1188 cinfo->ds->nwritten -= ret;
1189 cinfo->ds->ncommitting += ret;
1190 bucket->clseg = bucket->wlseg;
1191 if (list_empty(src))
1192 bucket->wlseg = NULL;
1193 else
1194 pnfs_get_lseg(bucket->clseg);
1195 }
1196 return ret;
1197 }
1198
1199 /* Move reqs from written to committing lists, returning count of number moved.
1200 * Note called with cinfo->lock held.
1201 */
1202 static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
1203 int max)
1204 {
1205 int i, rv = 0, cnt;
1206
1207 for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
1208 cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
1209 cinfo, max);
1210 max -= cnt;
1211 rv += cnt;
1212 }
1213 return rv;
1214 }
1215
1216 /* Pull everything off the committing lists and dump into @dst */
1217 static void filelayout_recover_commit_reqs(struct list_head *dst,
1218 struct nfs_commit_info *cinfo)
1219 {
1220 struct pnfs_commit_bucket *b;
1221 int i;
1222
1223 spin_lock(cinfo->lock);
1224 for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
1225 if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
1226 spin_unlock(cinfo->lock);
1227 pnfs_put_lseg(b->wlseg);
1228 b->wlseg = NULL;
1229 spin_lock(cinfo->lock);
1230 }
1231 }
1232 cinfo->ds->nwritten = 0;
1233 spin_unlock(cinfo->lock);
1234 }
1235
1236 static unsigned int
1237 alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
1238 {
1239 struct pnfs_ds_commit_info *fl_cinfo;
1240 struct pnfs_commit_bucket *bucket;
1241 struct nfs_commit_data *data;
1242 int i, j;
1243 unsigned int nreq = 0;
1244
1245 fl_cinfo = cinfo->ds;
1246 bucket = fl_cinfo->buckets;
1247 for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
1248 if (list_empty(&bucket->committing))
1249 continue;
1250 data = nfs_commitdata_alloc();
1251 if (!data)
1252 break;
1253 data->ds_commit_index = i;
1254 data->lseg = bucket->clseg;
1255 bucket->clseg = NULL;
1256 list_add(&data->pages, list);
1257 nreq++;
1258 }
1259
1260 /* Clean up on error */
1261 for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
1262 if (list_empty(&bucket->committing))
1263 continue;
1264 nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
1265 pnfs_put_lseg(bucket->clseg);
1266 bucket->clseg = NULL;
1267 }
1268 /* Caller will clean up entries put on list */
1269 return nreq;
1270 }
1271
1272 /* This follows nfs_commit_list pretty closely */
1273 static int
1274 filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1275 int how, struct nfs_commit_info *cinfo)
1276 {
1277 struct nfs_commit_data *data, *tmp;
1278 LIST_HEAD(list);
1279 unsigned int nreq = 0;
1280
1281 if (!list_empty(mds_pages)) {
1282 data = nfs_commitdata_alloc();
1283 if (data != NULL) {
1284 data->lseg = NULL;
1285 list_add(&data->pages, &list);
1286 nreq++;
1287 } else
1288 nfs_retry_commit(mds_pages, NULL, cinfo);
1289 }
1290
1291 nreq += alloc_ds_commits(cinfo, &list);
1292
1293 if (nreq == 0) {
1294 cinfo->completion_ops->error_cleanup(NFS_I(inode));
1295 goto out;
1296 }
1297
1298 atomic_add(nreq, &cinfo->mds->rpcs_out);
1299
1300 list_for_each_entry_safe(data, tmp, &list, pages) {
1301 list_del_init(&data->pages);
1302 if (!data->lseg) {
1303 nfs_init_commit(data, mds_pages, NULL, cinfo);
1304 nfs_initiate_commit(NFS_CLIENT(inode), data,
1305 data->mds_ops, how, 0);
1306 } else {
1307 struct pnfs_commit_bucket *buckets;
1308
1309 buckets = cinfo->ds->buckets;
1310 nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
1311 filelayout_initiate_commit(data, how);
1312 }
1313 }
1314 out:
1315 cinfo->ds->ncommitting = 0;
1316 return PNFS_ATTEMPTED;
1317 }
1318
1319 static void
1320 filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
1321 {
1322 nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
1323 }
1324
1325 static struct pnfs_layout_hdr *
1326 filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
1327 {
1328 struct nfs4_filelayout *flo;
1329
1330 flo = kzalloc(sizeof(*flo), gfp_flags);
1331 return &flo->generic_hdr;
1332 }
1333
1334 static void
1335 filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
1336 {
1337 kfree(FILELAYOUT_FROM_HDR(lo));
1338 }
1339
1340 static struct pnfs_ds_commit_info *
1341 filelayout_get_ds_info(struct inode *inode)
1342 {
1343 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1344
1345 if (layout == NULL)
1346 return NULL;
1347 else
1348 return &FILELAYOUT_FROM_HDR(layout)->commit_info;
1349 }
1350
1351 static struct pnfs_layoutdriver_type filelayout_type = {
1352 .id = LAYOUT_NFSV4_1_FILES,
1353 .name = "LAYOUT_NFSV4_1_FILES",
1354 .owner = THIS_MODULE,
1355 .alloc_layout_hdr = filelayout_alloc_layout_hdr,
1356 .free_layout_hdr = filelayout_free_layout_hdr,
1357 .alloc_lseg = filelayout_alloc_lseg,
1358 .free_lseg = filelayout_free_lseg,
1359 .pg_read_ops = &filelayout_pg_read_ops,
1360 .pg_write_ops = &filelayout_pg_write_ops,
1361 .get_ds_info = &filelayout_get_ds_info,
1362 .mark_request_commit = filelayout_mark_request_commit,
1363 .clear_request_commit = filelayout_clear_request_commit,
1364 .scan_commit_lists = filelayout_scan_commit_lists,
1365 .recover_commit_reqs = filelayout_recover_commit_reqs,
1366 .commit_pagelist = filelayout_commit_pagelist,
1367 .read_pagelist = filelayout_read_pagelist,
1368 .write_pagelist = filelayout_write_pagelist,
1369 .free_deviceid_node = filelayout_free_deveiceid_node,
1370 };
1371
1372 static int __init nfs4filelayout_init(void)
1373 {
1374 printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
1375 __func__);
1376 return pnfs_register_layoutdriver(&filelayout_type);
1377 }
1378
1379 static void __exit nfs4filelayout_exit(void)
1380 {
1381 printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
1382 __func__);
1383 pnfs_unregister_layoutdriver(&filelayout_type);
1384 }
1385
1386 MODULE_ALIAS("nfs-layouttype4-1");
1387
1388 module_init(nfs4filelayout_init);
1389 module_exit(nfs4filelayout_exit);
This page took 0.058826 seconds and 5 git commands to generate.