Commit | Line | Data |
---|---|---|
85e174ba RL |
1 | /* |
2 | * pNFS functions to call and manage layout drivers. | |
3 | * | |
4 | * Copyright (c) 2002 [year of first publication] | |
5 | * The Regents of the University of Michigan | |
6 | * All Rights Reserved | |
7 | * | |
8 | * Dean Hildebrand <dhildebz@umich.edu> | |
9 | * | |
10 | * Permission is granted to use, copy, create derivative works, and | |
11 | * redistribute this software and such derivative works for any purpose, | |
12 | * so long as the name of the University of Michigan is not used in | |
13 | * any advertising or publicity pertaining to the use or distribution | |
14 | * of this software without specific, written prior authorization. If | |
15 | * the above copyright notice or any other identification of the | |
16 | * University of Michigan is included in any copy of any portion of | |
17 | * this software, then the disclaimer below must also be included. | |
18 | * | |
19 | * This software is provided as is, without representation or warranty | |
20 | * of any kind either express or implied, including without limitation | |
21 | * the implied warranties of merchantability, fitness for a particular | |
22 | * purpose, or noninfringement. The Regents of the University of | |
23 | * Michigan shall not be liable for any damages, including special, | |
24 | * indirect, incidental, or consequential damages, with respect to any | |
25 | * claim arising out of or in connection with the use of the software, | |
26 | * even if it has been or is hereafter advised of the possibility of | |
27 | * such damages. | |
28 | */ | |
29 | ||
30 | #include <linux/nfs_fs.h> | |
974cec8c | 31 | #include "internal.h" |
85e174ba | 32 | #include "pnfs.h" |
64419a9b | 33 | #include "iostat.h" |
85e174ba RL |
34 | |
35 | #define NFSDBG_FACILITY NFSDBG_PNFS | |
36 | ||
02c35fca FI |
37 | /* Locking: |
38 | * | |
39 | * pnfs_spinlock: | |
40 | * protects pnfs_modules_tbl. | |
41 | */ | |
42 | static DEFINE_SPINLOCK(pnfs_spinlock); | |
43 | ||
44 | /* | |
45 | * pnfs_modules_tbl holds all pnfs modules | |
46 | */ | |
47 | static LIST_HEAD(pnfs_modules_tbl); | |
48 | ||
49 | /* Return the registered pnfs layout driver module matching given id */ | |
50 | static struct pnfs_layoutdriver_type * | |
51 | find_pnfs_driver_locked(u32 id) | |
52 | { | |
53 | struct pnfs_layoutdriver_type *local; | |
54 | ||
55 | list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid) | |
56 | if (local->id == id) | |
57 | goto out; | |
58 | local = NULL; | |
59 | out: | |
60 | dprintk("%s: Searching for id %u, found %p\n", __func__, id, local); | |
61 | return local; | |
62 | } | |
63 | ||
85e174ba RL |
64 | static struct pnfs_layoutdriver_type * |
65 | find_pnfs_driver(u32 id) | |
66 | { | |
02c35fca FI |
67 | struct pnfs_layoutdriver_type *local; |
68 | ||
69 | spin_lock(&pnfs_spinlock); | |
70 | local = find_pnfs_driver_locked(id); | |
71 | spin_unlock(&pnfs_spinlock); | |
72 | return local; | |
85e174ba RL |
73 | } |
74 | ||
75 | void | |
76 | unset_pnfs_layoutdriver(struct nfs_server *nfss) | |
77 | { | |
ea8eecdd | 78 | if (nfss->pnfs_curr_ld) |
02c35fca | 79 | module_put(nfss->pnfs_curr_ld->owner); |
85e174ba RL |
80 | nfss->pnfs_curr_ld = NULL; |
81 | } | |
82 | ||
83 | /* | |
84 | * Try to set the server's pnfs module to the pnfs layout type specified by id. | |
85 | * Currently only one pNFS layout driver per filesystem is supported. | |
86 | * | |
87 | * @id layout type. Zero (illegal layout type) indicates pNFS not in use. | |
88 | */ | |
89 | void | |
90 | set_pnfs_layoutdriver(struct nfs_server *server, u32 id) | |
91 | { | |
92 | struct pnfs_layoutdriver_type *ld_type = NULL; | |
93 | ||
94 | if (id == 0) | |
95 | goto out_no_driver; | |
96 | if (!(server->nfs_client->cl_exchange_flags & | |
97 | (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { | |
98 | printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__, | |
99 | id, server->nfs_client->cl_exchange_flags); | |
100 | goto out_no_driver; | |
101 | } | |
102 | ld_type = find_pnfs_driver(id); | |
103 | if (!ld_type) { | |
104 | request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id); | |
105 | ld_type = find_pnfs_driver(id); | |
106 | if (!ld_type) { | |
107 | dprintk("%s: No pNFS module found for %u.\n", | |
108 | __func__, id); | |
109 | goto out_no_driver; | |
110 | } | |
111 | } | |
02c35fca FI |
112 | if (!try_module_get(ld_type->owner)) { |
113 | dprintk("%s: Could not grab reference on module\n", __func__); | |
114 | goto out_no_driver; | |
115 | } | |
85e174ba | 116 | server->pnfs_curr_ld = ld_type; |
ea8eecdd | 117 | |
85e174ba RL |
118 | dprintk("%s: pNFS module for %u set\n", __func__, id); |
119 | return; | |
120 | ||
121 | out_no_driver: | |
122 | dprintk("%s: Using NFSv4 I/O\n", __func__); | |
123 | server->pnfs_curr_ld = NULL; | |
124 | } | |
02c35fca FI |
125 | |
126 | int | |
127 | pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) | |
128 | { | |
129 | int status = -EINVAL; | |
130 | struct pnfs_layoutdriver_type *tmp; | |
131 | ||
132 | if (ld_type->id == 0) { | |
133 | printk(KERN_ERR "%s id 0 is reserved\n", __func__); | |
134 | return status; | |
135 | } | |
b1f69b75 AA |
136 | if (!ld_type->alloc_lseg || !ld_type->free_lseg) { |
137 | printk(KERN_ERR "%s Layout driver must provide " | |
138 | "alloc_lseg and free_lseg.\n", __func__); | |
139 | return status; | |
140 | } | |
02c35fca FI |
141 | |
142 | spin_lock(&pnfs_spinlock); | |
143 | tmp = find_pnfs_driver_locked(ld_type->id); | |
144 | if (!tmp) { | |
145 | list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl); | |
146 | status = 0; | |
147 | dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id, | |
148 | ld_type->name); | |
149 | } else { | |
150 | printk(KERN_ERR "%s Module with id %d already loaded!\n", | |
151 | __func__, ld_type->id); | |
152 | } | |
153 | spin_unlock(&pnfs_spinlock); | |
154 | ||
155 | return status; | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver); | |
158 | ||
159 | void | |
160 | pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type) | |
161 | { | |
162 | dprintk("%s Deregistering id:%u\n", __func__, ld_type->id); | |
163 | spin_lock(&pnfs_spinlock); | |
164 | list_del(&ld_type->pnfs_tblid); | |
165 | spin_unlock(&pnfs_spinlock); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); | |
e5e94017 | 168 | |
b1f69b75 AA |
169 | /* |
170 | * pNFS client layout cache | |
171 | */ | |
172 | ||
cc6e5340 | 173 | /* Need to hold i_lock if caller does not already hold reference */ |
43f1b3da | 174 | void |
cc6e5340 | 175 | get_layout_hdr(struct pnfs_layout_hdr *lo) |
e5e94017 | 176 | { |
cc6e5340 | 177 | atomic_inc(&lo->plh_refcount); |
e5e94017 BH |
178 | } |
179 | ||
636fb9c8 BH |
180 | static struct pnfs_layout_hdr * |
181 | pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags) | |
182 | { | |
183 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; | |
184 | return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) : | |
185 | kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); | |
186 | } | |
187 | ||
188 | static void | |
189 | pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) | |
190 | { | |
191 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld; | |
192 | return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo); | |
193 | } | |
194 | ||
e5e94017 | 195 | static void |
cc6e5340 | 196 | destroy_layout_hdr(struct pnfs_layout_hdr *lo) |
e5e94017 | 197 | { |
cc6e5340 FI |
198 | dprintk("%s: freeing layout cache %p\n", __func__, lo); |
199 | BUG_ON(!list_empty(&lo->plh_layouts)); | |
200 | NFS_I(lo->plh_inode)->layout = NULL; | |
636fb9c8 | 201 | pnfs_free_layout_hdr(lo); |
cc6e5340 | 202 | } |
e5e94017 | 203 | |
cc6e5340 FI |
204 | static void |
205 | put_layout_hdr_locked(struct pnfs_layout_hdr *lo) | |
206 | { | |
207 | if (atomic_dec_and_test(&lo->plh_refcount)) | |
208 | destroy_layout_hdr(lo); | |
e5e94017 BH |
209 | } |
210 | ||
b1f69b75 | 211 | void |
cc6e5340 | 212 | put_layout_hdr(struct pnfs_layout_hdr *lo) |
974cec8c | 213 | { |
cc6e5340 FI |
214 | struct inode *inode = lo->plh_inode; |
215 | ||
216 | if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { | |
217 | destroy_layout_hdr(lo); | |
218 | spin_unlock(&inode->i_lock); | |
219 | } | |
974cec8c AA |
220 | } |
221 | ||
222 | static void | |
223 | init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) | |
224 | { | |
566052c5 | 225 | INIT_LIST_HEAD(&lseg->pls_list); |
4541d16c FI |
226 | atomic_set(&lseg->pls_refcount, 1); |
227 | smp_mb(); | |
228 | set_bit(NFS_LSEG_VALID, &lseg->pls_flags); | |
566052c5 | 229 | lseg->pls_layout = lo; |
974cec8c AA |
230 | } |
231 | ||
4541d16c | 232 | static void free_lseg(struct pnfs_layout_segment *lseg) |
974cec8c | 233 | { |
b7edfaa1 | 234 | struct inode *ino = lseg->pls_layout->plh_inode; |
974cec8c | 235 | |
b1f69b75 | 236 | NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); |
52fabd73 | 237 | /* Matched by get_layout_hdr in pnfs_insert_layout */ |
cc6e5340 | 238 | put_layout_hdr(NFS_I(ino)->layout); |
974cec8c AA |
239 | } |
240 | ||
d684d2ae FI |
241 | static void |
242 | put_lseg_common(struct pnfs_layout_segment *lseg) | |
243 | { | |
244 | struct inode *inode = lseg->pls_layout->plh_inode; | |
245 | ||
d20581aa | 246 | WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); |
d684d2ae FI |
247 | list_del_init(&lseg->pls_list); |
248 | if (list_empty(&lseg->pls_layout->plh_segs)) { | |
249 | set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags); | |
250 | /* Matched by initial refcount set in alloc_init_layout_hdr */ | |
251 | put_layout_hdr_locked(lseg->pls_layout); | |
252 | } | |
253 | rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); | |
254 | } | |
255 | ||
bae724ef | 256 | void |
d684d2ae | 257 | put_lseg(struct pnfs_layout_segment *lseg) |
974cec8c | 258 | { |
d684d2ae FI |
259 | struct inode *inode; |
260 | ||
261 | if (!lseg) | |
262 | return; | |
263 | ||
4541d16c FI |
264 | dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, |
265 | atomic_read(&lseg->pls_refcount), | |
266 | test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); | |
d684d2ae FI |
267 | inode = lseg->pls_layout->plh_inode; |
268 | if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { | |
269 | LIST_HEAD(free_me); | |
4541d16c | 270 | |
d684d2ae FI |
271 | put_lseg_common(lseg); |
272 | list_add(&lseg->pls_list, &free_me); | |
273 | spin_unlock(&inode->i_lock); | |
274 | pnfs_free_lseg_list(&free_me); | |
4541d16c | 275 | } |
4541d16c | 276 | } |
e0c2b380 | 277 | EXPORT_SYMBOL_GPL(put_lseg); |
974cec8c | 278 | |
fb3296eb BH |
279 | static inline u64 |
280 | end_offset(u64 start, u64 len) | |
281 | { | |
282 | u64 end; | |
283 | ||
284 | end = start + len; | |
285 | return end >= start ? end : NFS4_MAX_UINT64; | |
286 | } | |
287 | ||
288 | /* last octet in a range */ | |
289 | static inline u64 | |
290 | last_byte_offset(u64 start, u64 len) | |
291 | { | |
292 | u64 end; | |
293 | ||
294 | BUG_ON(!len); | |
295 | end = start + len; | |
296 | return end > start ? end - 1 : NFS4_MAX_UINT64; | |
297 | } | |
298 | ||
299 | /* | |
300 | * is l2 fully contained in l1? | |
301 | * start1 end1 | |
302 | * [----------------------------------) | |
303 | * start2 end2 | |
304 | * [----------------) | |
305 | */ | |
306 | static inline int | |
307 | lo_seg_contained(struct pnfs_layout_range *l1, | |
308 | struct pnfs_layout_range *l2) | |
309 | { | |
310 | u64 start1 = l1->offset; | |
311 | u64 end1 = end_offset(start1, l1->length); | |
312 | u64 start2 = l2->offset; | |
313 | u64 end2 = end_offset(start2, l2->length); | |
314 | ||
315 | return (start1 <= start2) && (end1 >= end2); | |
316 | } | |
317 | ||
318 | /* | |
319 | * is l1 and l2 intersecting? | |
320 | * start1 end1 | |
321 | * [----------------------------------) | |
322 | * start2 end2 | |
323 | * [----------------) | |
324 | */ | |
325 | static inline int | |
326 | lo_seg_intersecting(struct pnfs_layout_range *l1, | |
327 | struct pnfs_layout_range *l2) | |
328 | { | |
329 | u64 start1 = l1->offset; | |
330 | u64 end1 = end_offset(start1, l1->length); | |
331 | u64 start2 = l2->offset; | |
332 | u64 end2 = end_offset(start2, l2->length); | |
333 | ||
334 | return (end1 == NFS4_MAX_UINT64 || end1 > start2) && | |
335 | (end2 == NFS4_MAX_UINT64 || end2 > start1); | |
336 | } | |
337 | ||
4541d16c | 338 | static bool |
778b5502 BH |
339 | should_free_lseg(struct pnfs_layout_range *lseg_range, |
340 | struct pnfs_layout_range *recall_range) | |
4541d16c | 341 | { |
778b5502 BH |
342 | return (recall_range->iomode == IOMODE_ANY || |
343 | lseg_range->iomode == recall_range->iomode) && | |
344 | lo_seg_intersecting(lseg_range, recall_range); | |
974cec8c AA |
345 | } |
346 | ||
4541d16c FI |
347 | /* Returns 1 if lseg is removed from list, 0 otherwise */ |
348 | static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, | |
349 | struct list_head *tmp_list) | |
350 | { | |
351 | int rv = 0; | |
352 | ||
353 | if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { | |
354 | /* Remove the reference keeping the lseg in the | |
355 | * list. It will now be removed when all | |
356 | * outstanding io is finished. | |
357 | */ | |
d684d2ae FI |
358 | dprintk("%s: lseg %p ref %d\n", __func__, lseg, |
359 | atomic_read(&lseg->pls_refcount)); | |
360 | if (atomic_dec_and_test(&lseg->pls_refcount)) { | |
361 | put_lseg_common(lseg); | |
362 | list_add(&lseg->pls_list, tmp_list); | |
363 | rv = 1; | |
364 | } | |
4541d16c FI |
365 | } |
366 | return rv; | |
367 | } | |
368 | ||
369 | /* Returns count of number of matching invalid lsegs remaining in list | |
370 | * after call. | |
371 | */ | |
43f1b3da | 372 | int |
4541d16c FI |
373 | mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, |
374 | struct list_head *tmp_list, | |
778b5502 | 375 | struct pnfs_layout_range *recall_range) |
974cec8c AA |
376 | { |
377 | struct pnfs_layout_segment *lseg, *next; | |
4541d16c | 378 | int invalid = 0, removed = 0; |
974cec8c AA |
379 | |
380 | dprintk("%s:Begin lo %p\n", __func__, lo); | |
381 | ||
38511722 FI |
382 | if (list_empty(&lo->plh_segs)) { |
383 | if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) | |
384 | put_layout_hdr_locked(lo); | |
385 | return 0; | |
386 | } | |
4541d16c | 387 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) |
778b5502 BH |
388 | if (!recall_range || |
389 | should_free_lseg(&lseg->pls_range, recall_range)) { | |
4541d16c FI |
390 | dprintk("%s: freeing lseg %p iomode %d " |
391 | "offset %llu length %llu\n", __func__, | |
392 | lseg, lseg->pls_range.iomode, lseg->pls_range.offset, | |
393 | lseg->pls_range.length); | |
394 | invalid++; | |
395 | removed += mark_lseg_invalid(lseg, tmp_list); | |
396 | } | |
397 | dprintk("%s:Return %i\n", __func__, invalid - removed); | |
398 | return invalid - removed; | |
974cec8c AA |
399 | } |
400 | ||
f49f9baa | 401 | /* note free_me must contain lsegs from a single layout_hdr */ |
43f1b3da | 402 | void |
4541d16c | 403 | pnfs_free_lseg_list(struct list_head *free_me) |
974cec8c | 404 | { |
4541d16c | 405 | struct pnfs_layout_segment *lseg, *tmp; |
f49f9baa FI |
406 | struct pnfs_layout_hdr *lo; |
407 | ||
408 | if (list_empty(free_me)) | |
409 | return; | |
410 | ||
411 | lo = list_first_entry(free_me, struct pnfs_layout_segment, | |
412 | pls_list)->pls_layout; | |
974cec8c | 413 | |
f49f9baa FI |
414 | if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) { |
415 | struct nfs_client *clp; | |
416 | ||
417 | clp = NFS_SERVER(lo->plh_inode)->nfs_client; | |
418 | spin_lock(&clp->cl_lock); | |
419 | list_del_init(&lo->plh_layouts); | |
420 | spin_unlock(&clp->cl_lock); | |
421 | } | |
4541d16c | 422 | list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { |
566052c5 | 423 | list_del(&lseg->pls_list); |
4541d16c | 424 | free_lseg(lseg); |
974cec8c AA |
425 | } |
426 | } | |
427 | ||
e5e94017 BH |
428 | void |
429 | pnfs_destroy_layout(struct nfs_inode *nfsi) | |
430 | { | |
431 | struct pnfs_layout_hdr *lo; | |
974cec8c | 432 | LIST_HEAD(tmp_list); |
e5e94017 BH |
433 | |
434 | spin_lock(&nfsi->vfs_inode.i_lock); | |
435 | lo = nfsi->layout; | |
436 | if (lo) { | |
38511722 | 437 | lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ |
778b5502 | 438 | mark_matching_lsegs_invalid(lo, &tmp_list, NULL); |
e5e94017 BH |
439 | } |
440 | spin_unlock(&nfsi->vfs_inode.i_lock); | |
974cec8c AA |
441 | pnfs_free_lseg_list(&tmp_list); |
442 | } | |
443 | ||
444 | /* | |
445 | * Called by the state manger to remove all layouts established under an | |
446 | * expired lease. | |
447 | */ | |
448 | void | |
449 | pnfs_destroy_all_layouts(struct nfs_client *clp) | |
450 | { | |
6382a441 | 451 | struct nfs_server *server; |
974cec8c AA |
452 | struct pnfs_layout_hdr *lo; |
453 | LIST_HEAD(tmp_list); | |
454 | ||
455 | spin_lock(&clp->cl_lock); | |
6382a441 WAA |
456 | rcu_read_lock(); |
457 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | |
458 | if (!list_empty(&server->layouts)) | |
459 | list_splice_init(&server->layouts, &tmp_list); | |
460 | } | |
461 | rcu_read_unlock(); | |
974cec8c AA |
462 | spin_unlock(&clp->cl_lock); |
463 | ||
464 | while (!list_empty(&tmp_list)) { | |
465 | lo = list_entry(tmp_list.next, struct pnfs_layout_hdr, | |
b7edfaa1 | 466 | plh_layouts); |
974cec8c | 467 | dprintk("%s freeing layout for inode %lu\n", __func__, |
b7edfaa1 | 468 | lo->plh_inode->i_ino); |
2887fe45 | 469 | list_del_init(&lo->plh_layouts); |
b7edfaa1 | 470 | pnfs_destroy_layout(NFS_I(lo->plh_inode)); |
974cec8c | 471 | } |
e5e94017 BH |
472 | } |
473 | ||
fd6002e9 | 474 | /* update lo->plh_stateid with new if is more recent */ |
43f1b3da FI |
475 | void |
476 | pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, | |
477 | bool update_barrier) | |
b1f69b75 | 478 | { |
fd6002e9 | 479 | u32 oldseq, newseq; |
b1f69b75 | 480 | |
fd6002e9 FI |
481 | oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid); |
482 | newseq = be32_to_cpu(new->stateid.seqid); | |
43f1b3da | 483 | if ((int)(newseq - oldseq) > 0) { |
fd6002e9 | 484 | memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid)); |
43f1b3da FI |
485 | if (update_barrier) { |
486 | u32 new_barrier = be32_to_cpu(new->stateid.seqid); | |
487 | ||
488 | if ((int)(new_barrier - lo->plh_barrier)) | |
489 | lo->plh_barrier = new_barrier; | |
490 | } else { | |
491 | /* Because of wraparound, we want to keep the barrier | |
492 | * "close" to the current seqids. It needs to be | |
493 | * within 2**31 to count as "behind", so if it | |
494 | * gets too near that limit, give us a litle leeway | |
495 | * and bring it to within 2**30. | |
496 | * NOTE - and yes, this is all unsigned arithmetic. | |
497 | */ | |
498 | if (unlikely((newseq - lo->plh_barrier) > (3 << 29))) | |
499 | lo->plh_barrier = newseq - (1 << 30); | |
500 | } | |
501 | } | |
b1f69b75 AA |
502 | } |
503 | ||
cf7d63f1 FI |
504 | /* lget is set to 1 if called from inside send_layoutget call chain */ |
505 | static bool | |
43f1b3da FI |
506 | pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid, |
507 | int lget) | |
508 | { | |
509 | if ((stateid) && | |
510 | (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0) | |
511 | return true; | |
f7e8917a | 512 | return lo->plh_block_lgets || |
38511722 | 513 | test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) || |
f7e8917a | 514 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || |
43f1b3da | 515 | (list_empty(&lo->plh_segs) && |
cf7d63f1 FI |
516 | (atomic_read(&lo->plh_outstanding) > lget)); |
517 | } | |
518 | ||
fd6002e9 FI |
519 | int |
520 | pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, | |
521 | struct nfs4_state *open_state) | |
b1f69b75 | 522 | { |
fd6002e9 | 523 | int status = 0; |
974cec8c | 524 | |
b1f69b75 | 525 | dprintk("--> %s\n", __func__); |
fd6002e9 | 526 | spin_lock(&lo->plh_inode->i_lock); |
43f1b3da | 527 | if (pnfs_layoutgets_blocked(lo, NULL, 1)) { |
cf7d63f1 FI |
528 | status = -EAGAIN; |
529 | } else if (list_empty(&lo->plh_segs)) { | |
fd6002e9 FI |
530 | int seq; |
531 | ||
532 | do { | |
533 | seq = read_seqbegin(&open_state->seqlock); | |
534 | memcpy(dst->data, open_state->stateid.data, | |
535 | sizeof(open_state->stateid.data)); | |
536 | } while (read_seqretry(&open_state->seqlock, seq)); | |
537 | } else | |
538 | memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data)); | |
539 | spin_unlock(&lo->plh_inode->i_lock); | |
b1f69b75 | 540 | dprintk("<-- %s\n", __func__); |
fd6002e9 | 541 | return status; |
b1f69b75 AA |
542 | } |
543 | ||
544 | /* | |
545 | * Get layout from server. | |
546 | * for now, assume that whole file layouts are requested. | |
547 | * arg->offset: 0 | |
548 | * arg->length: all ones | |
549 | */ | |
e5e94017 BH |
550 | static struct pnfs_layout_segment * |
551 | send_layoutget(struct pnfs_layout_hdr *lo, | |
552 | struct nfs_open_context *ctx, | |
fb3296eb | 553 | struct pnfs_layout_range *range, |
a75b9df9 | 554 | gfp_t gfp_flags) |
e5e94017 | 555 | { |
b7edfaa1 | 556 | struct inode *ino = lo->plh_inode; |
b1f69b75 AA |
557 | struct nfs_server *server = NFS_SERVER(ino); |
558 | struct nfs4_layoutget *lgp; | |
559 | struct pnfs_layout_segment *lseg = NULL; | |
35124a09 WAA |
560 | struct page **pages = NULL; |
561 | int i; | |
562 | u32 max_resp_sz, max_pages; | |
b1f69b75 AA |
563 | |
564 | dprintk("--> %s\n", __func__); | |
e5e94017 | 565 | |
b1f69b75 | 566 | BUG_ON(ctx == NULL); |
a75b9df9 | 567 | lgp = kzalloc(sizeof(*lgp), gfp_flags); |
cf7d63f1 | 568 | if (lgp == NULL) |
b1f69b75 | 569 | return NULL; |
35124a09 WAA |
570 | |
571 | /* allocate pages for xdr post processing */ | |
572 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; | |
573 | max_pages = max_resp_sz >> PAGE_SHIFT; | |
574 | ||
a75b9df9 | 575 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
35124a09 WAA |
576 | if (!pages) |
577 | goto out_err_free; | |
578 | ||
579 | for (i = 0; i < max_pages; i++) { | |
a75b9df9 | 580 | pages[i] = alloc_page(gfp_flags); |
35124a09 WAA |
581 | if (!pages[i]) |
582 | goto out_err_free; | |
583 | } | |
584 | ||
fb3296eb BH |
585 | lgp->args.minlength = PAGE_CACHE_SIZE; |
586 | if (lgp->args.minlength > range->length) | |
587 | lgp->args.minlength = range->length; | |
b1f69b75 | 588 | lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; |
fb3296eb | 589 | lgp->args.range = *range; |
b1f69b75 AA |
590 | lgp->args.type = server->pnfs_curr_ld->id; |
591 | lgp->args.inode = ino; | |
592 | lgp->args.ctx = get_nfs_open_context(ctx); | |
35124a09 WAA |
593 | lgp->args.layout.pages = pages; |
594 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; | |
b1f69b75 | 595 | lgp->lsegpp = &lseg; |
a75b9df9 | 596 | lgp->gfp_flags = gfp_flags; |
b1f69b75 AA |
597 | |
598 | /* Synchronously retrieve layout information from server and | |
599 | * store in lseg. | |
600 | */ | |
601 | nfs4_proc_layoutget(lgp); | |
974cec8c | 602 | if (!lseg) { |
b1f69b75 | 603 | /* remember that LAYOUTGET failed and suspend trying */ |
fb3296eb | 604 | set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); |
974cec8c | 605 | } |
35124a09 WAA |
606 | |
607 | /* free xdr pages */ | |
608 | for (i = 0; i < max_pages; i++) | |
609 | __free_page(pages[i]); | |
610 | kfree(pages); | |
611 | ||
974cec8c | 612 | return lseg; |
35124a09 WAA |
613 | |
614 | out_err_free: | |
615 | /* free any allocated xdr pages, lgp as it's not used */ | |
616 | if (pages) { | |
617 | for (i = 0; i < max_pages; i++) { | |
618 | if (!pages[i]) | |
619 | break; | |
620 | __free_page(pages[i]); | |
621 | } | |
622 | kfree(pages); | |
623 | } | |
624 | kfree(lgp); | |
625 | return NULL; | |
974cec8c AA |
626 | } |
627 | ||
cbe82603 BH |
628 | /* Initiates a LAYOUTRETURN(FILE) */ |
629 | int | |
630 | _pnfs_return_layout(struct inode *ino) | |
631 | { | |
632 | struct pnfs_layout_hdr *lo = NULL; | |
633 | struct nfs_inode *nfsi = NFS_I(ino); | |
634 | LIST_HEAD(tmp_list); | |
635 | struct nfs4_layoutreturn *lrp; | |
636 | nfs4_stateid stateid; | |
637 | int status = 0; | |
638 | ||
639 | dprintk("--> %s\n", __func__); | |
640 | ||
641 | spin_lock(&ino->i_lock); | |
642 | lo = nfsi->layout; | |
a2e1d4f2 | 643 | if (!lo) { |
cbe82603 | 644 | spin_unlock(&ino->i_lock); |
a2e1d4f2 FI |
645 | dprintk("%s: no layout to return\n", __func__); |
646 | return status; | |
cbe82603 BH |
647 | } |
648 | stateid = nfsi->layout->plh_stateid; | |
649 | /* Reference matched in nfs4_layoutreturn_release */ | |
650 | get_layout_hdr(lo); | |
ea0ded74 FI |
651 | mark_matching_lsegs_invalid(lo, &tmp_list, NULL); |
652 | lo->plh_block_lgets++; | |
cbe82603 BH |
653 | spin_unlock(&ino->i_lock); |
654 | pnfs_free_lseg_list(&tmp_list); | |
655 | ||
656 | WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)); | |
657 | ||
658 | lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); | |
659 | if (unlikely(lrp == NULL)) { | |
660 | status = -ENOMEM; | |
9e2dfdb3 FI |
661 | set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags); |
662 | set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags); | |
1ed3a853 | 663 | put_layout_hdr(lo); |
cbe82603 BH |
664 | goto out; |
665 | } | |
666 | ||
667 | lrp->args.stateid = stateid; | |
668 | lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; | |
669 | lrp->args.inode = ino; | |
670 | lrp->clp = NFS_SERVER(ino)->nfs_client; | |
671 | ||
672 | status = nfs4_proc_layoutreturn(lrp); | |
673 | out: | |
674 | dprintk("<-- %s status: %d\n", __func__, status); | |
675 | return status; | |
676 | } | |
677 | ||
f7e8917a FI |
678 | bool pnfs_roc(struct inode *ino) |
679 | { | |
680 | struct pnfs_layout_hdr *lo; | |
681 | struct pnfs_layout_segment *lseg, *tmp; | |
682 | LIST_HEAD(tmp_list); | |
683 | bool found = false; | |
684 | ||
685 | spin_lock(&ino->i_lock); | |
686 | lo = NFS_I(ino)->layout; | |
687 | if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || | |
688 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) | |
689 | goto out_nolayout; | |
690 | list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) | |
691 | if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { | |
692 | mark_lseg_invalid(lseg, &tmp_list); | |
693 | found = true; | |
694 | } | |
695 | if (!found) | |
696 | goto out_nolayout; | |
697 | lo->plh_block_lgets++; | |
698 | get_layout_hdr(lo); /* matched in pnfs_roc_release */ | |
699 | spin_unlock(&ino->i_lock); | |
700 | pnfs_free_lseg_list(&tmp_list); | |
701 | return true; | |
702 | ||
703 | out_nolayout: | |
704 | spin_unlock(&ino->i_lock); | |
705 | return false; | |
706 | } | |
707 | ||
708 | void pnfs_roc_release(struct inode *ino) | |
709 | { | |
710 | struct pnfs_layout_hdr *lo; | |
711 | ||
712 | spin_lock(&ino->i_lock); | |
713 | lo = NFS_I(ino)->layout; | |
714 | lo->plh_block_lgets--; | |
715 | put_layout_hdr_locked(lo); | |
716 | spin_unlock(&ino->i_lock); | |
717 | } | |
718 | ||
719 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier) | |
720 | { | |
721 | struct pnfs_layout_hdr *lo; | |
722 | ||
723 | spin_lock(&ino->i_lock); | |
724 | lo = NFS_I(ino)->layout; | |
725 | if ((int)(barrier - lo->plh_barrier) > 0) | |
726 | lo->plh_barrier = barrier; | |
727 | spin_unlock(&ino->i_lock); | |
728 | } | |
729 | ||
730 | bool pnfs_roc_drain(struct inode *ino, u32 *barrier) | |
731 | { | |
732 | struct nfs_inode *nfsi = NFS_I(ino); | |
733 | struct pnfs_layout_segment *lseg; | |
734 | bool found = false; | |
735 | ||
736 | spin_lock(&ino->i_lock); | |
737 | list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) | |
738 | if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { | |
739 | found = true; | |
740 | break; | |
741 | } | |
742 | if (!found) { | |
743 | struct pnfs_layout_hdr *lo = nfsi->layout; | |
744 | u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid); | |
745 | ||
746 | /* Since close does not return a layout stateid for use as | |
747 | * a barrier, we choose the worst-case barrier. | |
748 | */ | |
749 | *barrier = current_seqid + atomic_read(&lo->plh_outstanding); | |
750 | } | |
751 | spin_unlock(&ino->i_lock); | |
752 | return found; | |
753 | } | |
754 | ||
b1f69b75 AA |
755 | /* |
756 | * Compare two layout segments for sorting into layout cache. | |
757 | * We want to preferentially return RW over RO layouts, so ensure those | |
758 | * are seen first. | |
759 | */ | |
760 | static s64 | |
fb3296eb BH |
761 | cmp_layout(struct pnfs_layout_range *l1, |
762 | struct pnfs_layout_range *l2) | |
b1f69b75 | 763 | { |
fb3296eb BH |
764 | s64 d; |
765 | ||
766 | /* high offset > low offset */ | |
767 | d = l1->offset - l2->offset; | |
768 | if (d) | |
769 | return d; | |
770 | ||
771 | /* short length > long length */ | |
772 | d = l2->length - l1->length; | |
773 | if (d) | |
774 | return d; | |
775 | ||
b1f69b75 | 776 | /* read > read/write */ |
fb3296eb | 777 | return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); |
b1f69b75 AA |
778 | } |
779 | ||
974cec8c AA |
780 | static void |
781 | pnfs_insert_layout(struct pnfs_layout_hdr *lo, | |
782 | struct pnfs_layout_segment *lseg) | |
783 | { | |
b1f69b75 | 784 | struct pnfs_layout_segment *lp; |
b1f69b75 | 785 | |
974cec8c AA |
786 | dprintk("%s:Begin\n", __func__); |
787 | ||
b7edfaa1 | 788 | assert_spin_locked(&lo->plh_inode->i_lock); |
b7edfaa1 | 789 | list_for_each_entry(lp, &lo->plh_segs, pls_list) { |
fb3296eb | 790 | if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0) |
b1f69b75 | 791 | continue; |
566052c5 | 792 | list_add_tail(&lseg->pls_list, &lp->pls_list); |
b1f69b75 AA |
793 | dprintk("%s: inserted lseg %p " |
794 | "iomode %d offset %llu length %llu before " | |
795 | "lp %p iomode %d offset %llu length %llu\n", | |
566052c5 FI |
796 | __func__, lseg, lseg->pls_range.iomode, |
797 | lseg->pls_range.offset, lseg->pls_range.length, | |
798 | lp, lp->pls_range.iomode, lp->pls_range.offset, | |
799 | lp->pls_range.length); | |
fb3296eb | 800 | goto out; |
974cec8c | 801 | } |
fb3296eb BH |
802 | list_add_tail(&lseg->pls_list, &lo->plh_segs); |
803 | dprintk("%s: inserted lseg %p " | |
804 | "iomode %d offset %llu length %llu at tail\n", | |
805 | __func__, lseg, lseg->pls_range.iomode, | |
806 | lseg->pls_range.offset, lseg->pls_range.length); | |
807 | out: | |
cc6e5340 | 808 | get_layout_hdr(lo); |
974cec8c AA |
809 | |
810 | dprintk("%s:Return\n", __func__); | |
e5e94017 BH |
811 | } |
812 | ||
813 | static struct pnfs_layout_hdr * | |
a75b9df9 | 814 | alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) |
e5e94017 BH |
815 | { |
816 | struct pnfs_layout_hdr *lo; | |
817 | ||
636fb9c8 | 818 | lo = pnfs_alloc_layout_hdr(ino, gfp_flags); |
e5e94017 BH |
819 | if (!lo) |
820 | return NULL; | |
cc6e5340 | 821 | atomic_set(&lo->plh_refcount, 1); |
b7edfaa1 FI |
822 | INIT_LIST_HEAD(&lo->plh_layouts); |
823 | INIT_LIST_HEAD(&lo->plh_segs); | |
43f1b3da | 824 | INIT_LIST_HEAD(&lo->plh_bulk_recall); |
b7edfaa1 | 825 | lo->plh_inode = ino; |
e5e94017 BH |
826 | return lo; |
827 | } | |
828 | ||
829 | static struct pnfs_layout_hdr * | |
a75b9df9 | 830 | pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) |
e5e94017 BH |
831 | { |
832 | struct nfs_inode *nfsi = NFS_I(ino); | |
833 | struct pnfs_layout_hdr *new = NULL; | |
834 | ||
835 | dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); | |
836 | ||
837 | assert_spin_locked(&ino->i_lock); | |
4541d16c FI |
838 | if (nfsi->layout) { |
839 | if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags)) | |
840 | return NULL; | |
841 | else | |
842 | return nfsi->layout; | |
843 | } | |
e5e94017 | 844 | spin_unlock(&ino->i_lock); |
a75b9df9 | 845 | new = alloc_init_layout_hdr(ino, gfp_flags); |
e5e94017 BH |
846 | spin_lock(&ino->i_lock); |
847 | ||
848 | if (likely(nfsi->layout == NULL)) /* Won the race? */ | |
849 | nfsi->layout = new; | |
850 | else | |
636fb9c8 | 851 | pnfs_free_layout_hdr(new); |
e5e94017 BH |
852 | return nfsi->layout; |
853 | } | |
854 | ||
b1f69b75 AA |
855 | /* |
856 | * iomode matching rules: | |
857 | * iomode lseg match | |
858 | * ----- ----- ----- | |
859 | * ANY READ true | |
860 | * ANY RW true | |
861 | * RW READ false | |
862 | * RW RW true | |
863 | * READ READ true | |
864 | * READ RW true | |
865 | */ | |
866 | static int | |
fb3296eb BH |
867 | is_matching_lseg(struct pnfs_layout_range *ls_range, |
868 | struct pnfs_layout_range *range) | |
b1f69b75 | 869 | { |
fb3296eb BH |
870 | struct pnfs_layout_range range1; |
871 | ||
872 | if ((range->iomode == IOMODE_RW && | |
873 | ls_range->iomode != IOMODE_RW) || | |
874 | !lo_seg_intersecting(ls_range, range)) | |
875 | return 0; | |
876 | ||
877 | /* range1 covers only the first byte in the range */ | |
878 | range1 = *range; | |
879 | range1.length = 1; | |
880 | return lo_seg_contained(ls_range, &range1); | |
b1f69b75 AA |
881 | } |
882 | ||
883 | /* | |
884 | * lookup range in layout | |
885 | */ | |
e5e94017 | 886 | static struct pnfs_layout_segment * |
fb3296eb BH |
887 | pnfs_find_lseg(struct pnfs_layout_hdr *lo, |
888 | struct pnfs_layout_range *range) | |
e5e94017 | 889 | { |
b1f69b75 AA |
890 | struct pnfs_layout_segment *lseg, *ret = NULL; |
891 | ||
892 | dprintk("%s:Begin\n", __func__); | |
893 | ||
b7edfaa1 FI |
894 | assert_spin_locked(&lo->plh_inode->i_lock); |
895 | list_for_each_entry(lseg, &lo->plh_segs, pls_list) { | |
4541d16c | 896 | if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && |
fb3296eb | 897 | is_matching_lseg(&lseg->pls_range, range)) { |
d684d2ae | 898 | ret = get_lseg(lseg); |
b1f69b75 AA |
899 | break; |
900 | } | |
d771e3a4 | 901 | if (lseg->pls_range.offset > range->offset) |
b1f69b75 AA |
902 | break; |
903 | } | |
904 | ||
905 | dprintk("%s:Return lseg %p ref %d\n", | |
4541d16c | 906 | __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0); |
b1f69b75 | 907 | return ret; |
e5e94017 BH |
908 | } |
909 | ||
910 | /* | |
911 | * Layout segment is retreived from the server if not cached. | |
912 | * The appropriate layout segment is referenced and returned to the caller. | |
913 | */ | |
914 | struct pnfs_layout_segment * | |
915 | pnfs_update_layout(struct inode *ino, | |
916 | struct nfs_open_context *ctx, | |
fb3296eb BH |
917 | loff_t pos, |
918 | u64 count, | |
a75b9df9 TM |
919 | enum pnfs_iomode iomode, |
920 | gfp_t gfp_flags) | |
e5e94017 | 921 | { |
fb3296eb BH |
922 | struct pnfs_layout_range arg = { |
923 | .iomode = iomode, | |
924 | .offset = pos, | |
925 | .length = count, | |
926 | }; | |
707ed5fd | 927 | unsigned pg_offset; |
e5e94017 | 928 | struct nfs_inode *nfsi = NFS_I(ino); |
6382a441 WAA |
929 | struct nfs_server *server = NFS_SERVER(ino); |
930 | struct nfs_client *clp = server->nfs_client; | |
e5e94017 BH |
931 | struct pnfs_layout_hdr *lo; |
932 | struct pnfs_layout_segment *lseg = NULL; | |
f49f9baa | 933 | bool first = false; |
e5e94017 BH |
934 | |
935 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) | |
936 | return NULL; | |
937 | spin_lock(&ino->i_lock); | |
a75b9df9 | 938 | lo = pnfs_find_alloc_layout(ino, gfp_flags); |
e5e94017 BH |
939 | if (lo == NULL) { |
940 | dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); | |
941 | goto out_unlock; | |
942 | } | |
943 | ||
43f1b3da FI |
944 | /* Do we even need to bother with this? */ |
945 | if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || | |
946 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { | |
947 | dprintk("%s matches recall, use MDS\n", __func__); | |
e5e94017 BH |
948 | goto out_unlock; |
949 | } | |
950 | ||
951 | /* if LAYOUTGET already failed once we don't try again */ | |
566052c5 | 952 | if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags)) |
e5e94017 BH |
953 | goto out_unlock; |
954 | ||
568e8c49 | 955 | /* Check to see if the layout for the given range already exists */ |
fb3296eb | 956 | lseg = pnfs_find_lseg(lo, &arg); |
568e8c49 AA |
957 | if (lseg) |
958 | goto out_unlock; | |
959 | ||
43f1b3da | 960 | if (pnfs_layoutgets_blocked(lo, NULL, 0)) |
cf7d63f1 FI |
961 | goto out_unlock; |
962 | atomic_inc(&lo->plh_outstanding); | |
963 | ||
cc6e5340 | 964 | get_layout_hdr(lo); |
f49f9baa FI |
965 | if (list_empty(&lo->plh_segs)) |
966 | first = true; | |
967 | spin_unlock(&ino->i_lock); | |
968 | if (first) { | |
2130ff66 FI |
969 | /* The lo must be on the clp list if there is any |
970 | * chance of a CB_LAYOUTRECALL(FILE) coming in. | |
971 | */ | |
972 | spin_lock(&clp->cl_lock); | |
973 | BUG_ON(!list_empty(&lo->plh_layouts)); | |
6382a441 | 974 | list_add_tail(&lo->plh_layouts, &server->layouts); |
2130ff66 FI |
975 | spin_unlock(&clp->cl_lock); |
976 | } | |
e5e94017 | 977 | |
707ed5fd BH |
978 | pg_offset = arg.offset & ~PAGE_CACHE_MASK; |
979 | if (pg_offset) { | |
980 | arg.offset -= pg_offset; | |
981 | arg.length += pg_offset; | |
982 | } | |
983 | arg.length = PAGE_CACHE_ALIGN(arg.length); | |
984 | ||
fb3296eb | 985 | lseg = send_layoutget(lo, ctx, &arg, gfp_flags); |
f49f9baa FI |
986 | if (!lseg && first) { |
987 | spin_lock(&clp->cl_lock); | |
988 | list_del_init(&lo->plh_layouts); | |
989 | spin_unlock(&clp->cl_lock); | |
2130ff66 | 990 | } |
cf7d63f1 | 991 | atomic_dec(&lo->plh_outstanding); |
cc6e5340 | 992 | put_layout_hdr(lo); |
e5e94017 BH |
993 | out: |
994 | dprintk("%s end, state 0x%lx lseg %p\n", __func__, | |
bf9c1387 | 995 | nfsi->layout ? nfsi->layout->plh_flags : -1, lseg); |
e5e94017 BH |
996 | return lseg; |
997 | out_unlock: | |
998 | spin_unlock(&ino->i_lock); | |
999 | goto out; | |
1000 | } | |
b1f69b75 AA |
1001 | |
1002 | int | |
1003 | pnfs_layout_process(struct nfs4_layoutget *lgp) | |
1004 | { | |
1005 | struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; | |
1006 | struct nfs4_layoutget_res *res = &lgp->res; | |
1007 | struct pnfs_layout_segment *lseg; | |
b7edfaa1 | 1008 | struct inode *ino = lo->plh_inode; |
43f1b3da | 1009 | struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; |
b1f69b75 AA |
1010 | int status = 0; |
1011 | ||
1012 | /* Inject layout blob into I/O device driver */ | |
a75b9df9 | 1013 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); |
b1f69b75 AA |
1014 | if (!lseg || IS_ERR(lseg)) { |
1015 | if (!lseg) | |
1016 | status = -ENOMEM; | |
1017 | else | |
1018 | status = PTR_ERR(lseg); | |
1019 | dprintk("%s: Could not allocate layout: error %d\n", | |
1020 | __func__, status); | |
1021 | goto out; | |
1022 | } | |
1023 | ||
1024 | spin_lock(&ino->i_lock); | |
43f1b3da FI |
1025 | if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || |
1026 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { | |
1027 | dprintk("%s forget reply due to recall\n", __func__); | |
1028 | goto out_forget_reply; | |
1029 | } | |
1030 | ||
1031 | if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) { | |
1032 | dprintk("%s forget reply due to state\n", __func__); | |
1033 | goto out_forget_reply; | |
1034 | } | |
b1f69b75 | 1035 | init_lseg(lo, lseg); |
566052c5 | 1036 | lseg->pls_range = res->range; |
d684d2ae | 1037 | *lgp->lsegpp = get_lseg(lseg); |
b1f69b75 AA |
1038 | pnfs_insert_layout(lo, lseg); |
1039 | ||
f7e8917a FI |
1040 | if (res->return_on_close) { |
1041 | set_bit(NFS_LSEG_ROC, &lseg->pls_flags); | |
1042 | set_bit(NFS_LAYOUT_ROC, &lo->plh_flags); | |
1043 | } | |
1044 | ||
b1f69b75 | 1045 | /* Done processing layoutget. Set the layout stateid */ |
43f1b3da | 1046 | pnfs_set_layout_stateid(lo, &res->stateid, false); |
b1f69b75 AA |
1047 | spin_unlock(&ino->i_lock); |
1048 | out: | |
1049 | return status; | |
43f1b3da FI |
1050 | |
1051 | out_forget_reply: | |
1052 | spin_unlock(&ino->i_lock); | |
1053 | lseg->pls_layout = lo; | |
1054 | NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); | |
1055 | goto out; | |
b1f69b75 AA |
1056 | } |
1057 | ||
1751c363 TM |
1058 | bool |
1059 | pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode) | |
1060 | { | |
1061 | struct nfs_server *server = NFS_SERVER(inode); | |
1062 | struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; | |
1063 | ||
1064 | if (ld == NULL) | |
1065 | return false; | |
1066 | nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0); | |
1067 | return true; | |
1068 | } | |
1069 | ||
1070 | bool | |
1071 | pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags) | |
1072 | { | |
1073 | struct nfs_server *server = NFS_SERVER(inode); | |
1074 | struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; | |
1075 | ||
1076 | if (ld == NULL) | |
1077 | return false; | |
1078 | nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags); | |
1079 | return true; | |
1080 | } | |
1081 | ||
18ad0a9f | 1082 | bool |
dfed206b BH |
1083 | pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, |
1084 | struct nfs_page *req) | |
94ad1c80 | 1085 | { |
dfed206b BH |
1086 | enum pnfs_iomode access_type; |
1087 | gfp_t gfp_flags; | |
bae724ef | 1088 | |
dfed206b BH |
1089 | /* We assume that pg_ioflags == 0 iff we're reading a page */ |
1090 | if (pgio->pg_ioflags == 0) { | |
1091 | access_type = IOMODE_READ; | |
1092 | gfp_flags = GFP_KERNEL; | |
1093 | } else { | |
1094 | access_type = IOMODE_RW; | |
1095 | gfp_flags = GFP_NOFS; | |
1096 | } | |
94ad1c80 | 1097 | |
8f7d5efb TM |
1098 | if (pgio->pg_lseg == NULL) { |
1099 | if (pgio->pg_count != prev->wb_bytes) | |
1100 | return true; | |
bae724ef FI |
1101 | /* This is first coelesce call for a series of nfs_pages */ |
1102 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | |
1103 | prev->wb_context, | |
8f7d5efb | 1104 | req_offset(prev), |
fb3296eb | 1105 | pgio->pg_count, |
dfed206b BH |
1106 | access_type, |
1107 | gfp_flags); | |
8f7d5efb TM |
1108 | if (pgio->pg_lseg == NULL) |
1109 | return true; | |
bae724ef | 1110 | } |
94ad1c80 | 1111 | |
19982ba8 TM |
1112 | /* |
1113 | * Test if a nfs_page is fully contained in the pnfs_layout_range. | |
1114 | * Note that this test makes several assumptions: | |
1115 | * - that the previous nfs_page in the struct nfs_pageio_descriptor | |
1116 | * is known to lie within the range. | |
1117 | * - that the nfs_page being tested is known to be contiguous with the | |
1118 | * previous nfs_page. | |
1119 | * - Layout ranges are page aligned, so we only have to test the | |
1120 | * start offset of the request. | |
1121 | * | |
1122 | * Please also note that 'end_offset' is actually the offset of the | |
1123 | * first byte that lies outside the pnfs_layout_range. FIXME? | |
1124 | * | |
1125 | */ | |
1126 | return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset, | |
1127 | pgio->pg_lseg->pls_range.length); | |
94ad1c80 | 1128 | } |
89a58e32 | 1129 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); |
94ad1c80 | 1130 | |
d20581aa BH |
1131 | /* |
1132 | * Called by non rpc-based layout drivers | |
1133 | */ | |
1134 | int | |
1135 | pnfs_ld_write_done(struct nfs_write_data *data) | |
44b83799 | 1136 | { |
d20581aa | 1137 | int status; |
44b83799 | 1138 | |
d20581aa BH |
1139 | if (!data->pnfs_error) { |
1140 | pnfs_set_layoutcommit(data); | |
1141 | data->mds_ops->rpc_call_done(&data->task, data); | |
1142 | data->mds_ops->rpc_release(data); | |
1143 | return 0; | |
1144 | } | |
44b83799 | 1145 | |
d20581aa BH |
1146 | dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__, |
1147 | data->pnfs_error); | |
1148 | status = nfs_initiate_write(data, NFS_CLIENT(data->inode), | |
1149 | data->mds_ops, NFS_FILE_SYNC); | |
1150 | return status ? : -EAGAIN; | |
44b83799 | 1151 | } |
d20581aa | 1152 | EXPORT_SYMBOL_GPL(pnfs_ld_write_done); |
44b83799 | 1153 | |
0382b744 AA |
1154 | enum pnfs_try_status |
1155 | pnfs_try_to_write_data(struct nfs_write_data *wdata, | |
1156 | const struct rpc_call_ops *call_ops, int how) | |
1157 | { | |
1158 | struct inode *inode = wdata->inode; | |
1159 | enum pnfs_try_status trypnfs; | |
1160 | struct nfs_server *nfss = NFS_SERVER(inode); | |
1161 | ||
1162 | wdata->mds_ops = call_ops; | |
1163 | ||
1164 | dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, | |
1165 | inode->i_ino, wdata->args.count, wdata->args.offset, how); | |
1166 | ||
1167 | trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how); | |
1168 | if (trypnfs == PNFS_NOT_ATTEMPTED) { | |
1169 | put_lseg(wdata->lseg); | |
1170 | wdata->lseg = NULL; | |
1171 | } else | |
1172 | nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); | |
1173 | ||
1174 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); | |
1175 | return trypnfs; | |
1176 | } | |
1177 | ||
d20581aa BH |
1178 | /* |
1179 | * Called by non rpc-based layout drivers | |
1180 | */ | |
1181 | int | |
1182 | pnfs_ld_read_done(struct nfs_read_data *data) | |
1183 | { | |
1184 | int status; | |
1185 | ||
1186 | if (!data->pnfs_error) { | |
1187 | __nfs4_read_done_cb(data); | |
1188 | data->mds_ops->rpc_call_done(&data->task, data); | |
1189 | data->mds_ops->rpc_release(data); | |
1190 | return 0; | |
1191 | } | |
1192 | ||
1193 | dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__, | |
1194 | data->pnfs_error); | |
1195 | status = nfs_initiate_read(data, NFS_CLIENT(data->inode), | |
1196 | data->mds_ops); | |
1197 | return status ? : -EAGAIN; | |
1198 | } | |
1199 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); | |
1200 | ||
64419a9b AA |
1201 | /* |
1202 | * Call the appropriate parallel I/O subsystem read function. | |
1203 | */ | |
1204 | enum pnfs_try_status | |
1205 | pnfs_try_to_read_data(struct nfs_read_data *rdata, | |
1206 | const struct rpc_call_ops *call_ops) | |
1207 | { | |
1208 | struct inode *inode = rdata->inode; | |
1209 | struct nfs_server *nfss = NFS_SERVER(inode); | |
1210 | enum pnfs_try_status trypnfs; | |
1211 | ||
1212 | rdata->mds_ops = call_ops; | |
1213 | ||
1214 | dprintk("%s: Reading ino:%lu %u@%llu\n", | |
1215 | __func__, inode->i_ino, rdata->args.count, rdata->args.offset); | |
1216 | ||
1217 | trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata); | |
1218 | if (trypnfs == PNFS_NOT_ATTEMPTED) { | |
1219 | put_lseg(rdata->lseg); | |
1220 | rdata->lseg = NULL; | |
1221 | } else { | |
1222 | nfs_inc_stats(inode, NFSIOS_PNFS_READ); | |
1223 | } | |
1224 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); | |
1225 | return trypnfs; | |
1226 | } | |
863a3c6c AA |
1227 | |
1228 | /* | |
1229 | * Currently there is only one (whole file) write lseg. | |
1230 | */ | |
1231 | static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode) | |
1232 | { | |
1233 | struct pnfs_layout_segment *lseg, *rv = NULL; | |
1234 | ||
1235 | list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) | |
1236 | if (lseg->pls_range.iomode == IOMODE_RW) | |
1237 | rv = lseg; | |
1238 | return rv; | |
1239 | } | |
1240 | ||
1241 | void | |
1242 | pnfs_set_layoutcommit(struct nfs_write_data *wdata) | |
1243 | { | |
1244 | struct nfs_inode *nfsi = NFS_I(wdata->inode); | |
4b8ee2b8 | 1245 | loff_t end_pos = wdata->mds_offset + wdata->res.count; |
79a48a1f | 1246 | bool mark_as_dirty = false; |
863a3c6c AA |
1247 | |
1248 | spin_lock(&nfsi->vfs_inode.i_lock); | |
1249 | if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { | |
1250 | /* references matched in nfs4_layoutcommit_release */ | |
1251 | get_lseg(wdata->lseg); | |
1252 | wdata->lseg->pls_lc_cred = | |
1253 | get_rpccred(wdata->args.context->state->owner->so_cred); | |
79a48a1f | 1254 | mark_as_dirty = true; |
863a3c6c AA |
1255 | dprintk("%s: Set layoutcommit for inode %lu ", |
1256 | __func__, wdata->inode->i_ino); | |
1257 | } | |
1258 | if (end_pos > wdata->lseg->pls_end_pos) | |
1259 | wdata->lseg->pls_end_pos = end_pos; | |
1260 | spin_unlock(&nfsi->vfs_inode.i_lock); | |
79a48a1f WAA |
1261 | |
1262 | /* if pnfs_layoutcommit_inode() runs between inode locks, the next one | |
1263 | * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ | |
1264 | if (mark_as_dirty) | |
1265 | mark_inode_dirty_sync(wdata->inode); | |
863a3c6c AA |
1266 | } |
1267 | EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); | |
1268 | ||
de4b15c7 AA |
1269 | /* |
1270 | * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and | |
1271 | * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough | |
1272 | * data to disk to allow the server to recover the data if it crashes. | |
1273 | * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag | |
1274 | * is off, and a COMMIT is sent to a data server, or | |
1275 | * if WRITEs to a data server return NFS_DATA_SYNC. | |
1276 | */ | |
863a3c6c | 1277 | int |
ef311537 | 1278 | pnfs_layoutcommit_inode(struct inode *inode, bool sync) |
863a3c6c AA |
1279 | { |
1280 | struct nfs4_layoutcommit_data *data; | |
1281 | struct nfs_inode *nfsi = NFS_I(inode); | |
1282 | struct pnfs_layout_segment *lseg; | |
1283 | struct rpc_cred *cred; | |
1284 | loff_t end_pos; | |
1285 | int status = 0; | |
1286 | ||
1287 | dprintk("--> %s inode %lu\n", __func__, inode->i_ino); | |
1288 | ||
de4b15c7 AA |
1289 | if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) |
1290 | return 0; | |
1291 | ||
863a3c6c AA |
1292 | /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ |
1293 | data = kzalloc(sizeof(*data), GFP_NOFS); | |
de4b15c7 AA |
1294 | if (!data) { |
1295 | mark_inode_dirty_sync(inode); | |
1296 | status = -ENOMEM; | |
1297 | goto out; | |
1298 | } | |
863a3c6c | 1299 | |
de4b15c7 | 1300 | spin_lock(&inode->i_lock); |
863a3c6c AA |
1301 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { |
1302 | spin_unlock(&inode->i_lock); | |
1303 | kfree(data); | |
1304 | goto out; | |
1305 | } | |
1306 | /* | |
1307 | * Currently only one (whole file) write lseg which is referenced | |
1308 | * in pnfs_set_layoutcommit and will be found. | |
1309 | */ | |
1310 | lseg = pnfs_list_write_lseg(inode); | |
1311 | ||
1312 | end_pos = lseg->pls_end_pos; | |
1313 | cred = lseg->pls_lc_cred; | |
1314 | lseg->pls_end_pos = 0; | |
1315 | lseg->pls_lc_cred = NULL; | |
1316 | ||
de4b15c7 AA |
1317 | memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data, |
1318 | sizeof(nfsi->layout->plh_stateid.data)); | |
863a3c6c AA |
1319 | spin_unlock(&inode->i_lock); |
1320 | ||
1321 | data->args.inode = inode; | |
1322 | data->lseg = lseg; | |
1323 | data->cred = cred; | |
1324 | nfs_fattr_init(&data->fattr); | |
1325 | data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; | |
1326 | data->res.fattr = &data->fattr; | |
1327 | data->args.lastbytewritten = end_pos - 1; | |
1328 | data->res.server = NFS_SERVER(inode); | |
1329 | ||
1330 | status = nfs4_proc_layoutcommit(data, sync); | |
1331 | out: | |
1332 | dprintk("<-- %s status %d\n", __func__, status); | |
1333 | return status; | |
1334 | } |