staging: lustre: remove RETURN macro
[deliverable/linux.git] / drivers / staging / lustre / lustre / lov / lov_pack.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/lov/lov_pack.c
37 *
38 * (Un)packing of OST/MDS requests
39 *
40 * Author: Andreas Dilger <adilger@clusterfs.com>
41 */
42
43#define DEBUG_SUBSYSTEM S_LOV
44
45#include <lustre_net.h>
46#include <obd.h>
47#include <obd_lov.h>
48#include <obd_class.h>
49#include <obd_support.h>
50#include <lustre/lustre_user.h>
51
52#include "lov_internal.h"
53
53b78538 54void lov_dump_lmm_common(int level, void *lmmp)
d7e09d03
PT
55{
56 struct lov_mds_md *lmm = lmmp;
57 struct ost_id oi;
58
59 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
60 CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
61 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
62 le32_to_cpu(lmm->lmm_pattern));
63 CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
64 le32_to_cpu(lmm->lmm_stripe_size),
65 le16_to_cpu(lmm->lmm_stripe_count),
66 le16_to_cpu(lmm->lmm_layout_gen));
67}
68
69static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
70 int stripe_count)
71{
72 int i;
73
74 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
75 CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
76 stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
53b78538 77 return;
d7e09d03
PT
78 }
79
80 for (i = 0; i < stripe_count; ++i, ++lod) {
81 struct ost_id oi;
82
83 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
84 CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
85 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
86 }
87}
88
89void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
90{
91 lov_dump_lmm_common(level, lmm);
92 lov_dump_lmm_objects(level, lmm->lmm_objects,
93 le16_to_cpu(lmm->lmm_stripe_count));
94}
95
96void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
97{
98 lov_dump_lmm_common(level, lmm);
99 CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
100 lov_dump_lmm_objects(level, lmm->lmm_objects,
101 le16_to_cpu(lmm->lmm_stripe_count));
102}
103
104void lov_dump_lmm(int level, void *lmm)
105{
106 int magic;
107
108 magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
109 switch (magic) {
110 case LOV_MAGIC_V1:
111 return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
112 case LOV_MAGIC_V3:
113 return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
114 default:
115 CERROR("Cannot recognize lmm_magic %x", magic);
116 }
117 return;
118}
119
120#define LMM_ASSERT(test) \
121do { \
122 if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
123 LASSERT(test); /* so we know what assertion failed */ \
124} while(0)
125
126/* Pack LOV object metadata for disk storage. It is packed in LE byte
127 * order and is opaque to the networking layer.
128 *
129 * XXX In the future, this will be enhanced to get the EA size from the
130 * underlying OSC device(s) to get their EA sizes so we can stack
131 * LOVs properly. For now lov_mds_md_size() just assumes one obd_id
132 * per stripe.
133 */
134int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
135 struct lov_stripe_md *lsm)
136{
137 struct obd_device *obd = class_exp2obd(exp);
138 struct lov_obd *lov = &obd->u.lov;
139 struct lov_mds_md_v1 *lmmv1;
140 struct lov_mds_md_v3 *lmmv3;
141 __u16 stripe_count;
142 struct lov_ost_data_v1 *lmm_objects;
143 int lmm_size, lmm_magic;
144 int i;
145 int cplen = 0;
d7e09d03
PT
146
147 if (lsm) {
148 lmm_magic = lsm->lsm_magic;
149 } else {
150 if (lmmp && *lmmp)
151 lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
152 else
153 /* lsm == NULL and lmmp == NULL */
154 lmm_magic = LOV_MAGIC;
155 }
156
157 if ((lmm_magic != LOV_MAGIC_V1) &&
158 (lmm_magic != LOV_MAGIC_V3)) {
159 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
160 lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
0a3bdb00 161 return -EINVAL;
d7e09d03
PT
162
163 }
164
165 if (lsm) {
166 /* If we are just sizing the EA, limit the stripe count
167 * to the actual number of OSTs in this filesystem. */
168 if (!lmmp) {
169 stripe_count = lov_get_stripecnt(lov, lmm_magic,
5dd16419 170 lsm->lsm_stripe_count);
d7e09d03 171 lsm->lsm_stripe_count = stripe_count;
5dd16419 172 } else if (!lsm_is_released(lsm)) {
d7e09d03 173 stripe_count = lsm->lsm_stripe_count;
5dd16419
JX
174 } else {
175 stripe_count = 0;
d7e09d03
PT
176 }
177 } else {
178 /* No need to allocate more than maximum supported stripes.
179 * Anyway, this is pretty inaccurate since ld_tgt_count now
180 * represents max index and we should rely on the actual number
181 * of OSTs instead */
182 stripe_count = lov_mds_md_stripecnt(lov->lov_ocd.ocd_max_easize,
183 lmm_magic);
184 if (stripe_count > lov->desc.ld_tgt_count)
185 stripe_count = lov->desc.ld_tgt_count;
186 }
187
188 /* XXX LOV STACKING call into osc for sizes */
189 lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
190
191 if (!lmmp)
0a3bdb00 192 return lmm_size;
d7e09d03
PT
193
194 if (*lmmp && !lsm) {
195 stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
196 lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
197 OBD_FREE_LARGE(*lmmp, lmm_size);
198 *lmmp = NULL;
0a3bdb00 199 return 0;
d7e09d03
PT
200 }
201
202 if (!*lmmp) {
203 OBD_ALLOC_LARGE(*lmmp, lmm_size);
204 if (!*lmmp)
0a3bdb00 205 return -ENOMEM;
d7e09d03
PT
206 }
207
208 CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
209 lmm_magic, lmm_size);
210
211 lmmv1 = *lmmp;
212 lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
213 if (lmm_magic == LOV_MAGIC_V3)
214 lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
215 else
216 lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
217
218 if (!lsm)
0a3bdb00 219 return lmm_size;
d7e09d03
PT
220
221 /* lmmv1 and lmmv3 point to the same struct and have the
222 * same first fields
223 */
224 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
225 lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
226 lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
227 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
228 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
229 if (lsm->lsm_magic == LOV_MAGIC_V3) {
230 cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
231 sizeof(lmmv3->lmm_pool_name));
232 if (cplen >= sizeof(lmmv3->lmm_pool_name))
0a3bdb00 233 return -E2BIG;
d7e09d03
PT
234 lmm_objects = lmmv3->lmm_objects;
235 } else {
236 lmm_objects = lmmv1->lmm_objects;
237 }
238
239 for (i = 0; i < stripe_count; i++) {
240 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
241 /* XXX LOV STACKING call down to osc_packmd() to do packing */
242 LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
243 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
244 i, stripe_count, loi->loi_ost_idx);
245 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
246 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
247 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
248 }
249
0a3bdb00 250 return lmm_size;
d7e09d03
PT
251}
252
253/* Find the max stripecount we should use */
254__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
255{
256 __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
257
258 if (!stripe_count)
259 stripe_count = lov->desc.ld_default_stripe_count;
260 if (stripe_count > lov->desc.ld_active_tgt_count)
261 stripe_count = lov->desc.ld_active_tgt_count;
262 if (!stripe_count)
263 stripe_count = 1;
264
265 /* stripe count is based on whether ldiskfs can handle
266 * larger EA sizes */
267 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
268 lov->lov_ocd.ocd_max_easize)
269 max_stripes = lov_mds_md_stripecnt(lov->lov_ocd.ocd_max_easize,
270 magic);
271
272 if (stripe_count > max_stripes)
273 stripe_count = max_stripes;
274
275 return stripe_count;
276}
277
278
279static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
280{
281 int rc;
282
283 if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
284 char *buffer;
285 int sz;
286
287 CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
288 le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
289 sz = lmm_bytes * 2 + 1;
290 OBD_ALLOC_LARGE(buffer, sz);
291 if (buffer != NULL) {
292 int i;
293
294 for (i = 0; i < lmm_bytes; i++)
295 sprintf(buffer+2*i, "%.2X", ((char *)lmm)[i]);
296 buffer[sz - 1] = '\0';
297 CERROR("%s\n", buffer);
298 OBD_FREE_LARGE(buffer, sz);
299 }
300 return -EINVAL;
301 }
302 rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
303 lmm_bytes, stripe_count);
304 return rc;
305}
306
307int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
308 int pattern, int magic)
309{
310 int i, lsm_size;
d7e09d03
PT
311
312 CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
313
314 *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
315 if (!*lsmp) {
316 CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
0a3bdb00 317 return -ENOMEM;
d7e09d03
PT
318 }
319
320 atomic_set(&(*lsmp)->lsm_refc, 1);
321 spin_lock_init(&(*lsmp)->lsm_lock);
322 (*lsmp)->lsm_magic = magic;
323 (*lsmp)->lsm_stripe_count = stripe_count;
324 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
325 (*lsmp)->lsm_pattern = pattern;
326 (*lsmp)->lsm_pool_name[0] = '\0';
327 (*lsmp)->lsm_layout_gen = 0;
5dd16419
JX
328 if (stripe_count > 0)
329 (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
d7e09d03
PT
330
331 for (i = 0; i < stripe_count; i++)
332 loi_init((*lsmp)->lsm_oinfo[i]);
333
0a3bdb00 334 return lsm_size;
d7e09d03
PT
335}
336
337int lov_free_memmd(struct lov_stripe_md **lsmp)
338{
339 struct lov_stripe_md *lsm = *lsmp;
340 int refc;
341
342 *lsmp = NULL;
343 LASSERT(atomic_read(&lsm->lsm_refc) > 0);
344 if ((refc = atomic_dec_return(&lsm->lsm_refc)) == 0) {
345 LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
346 lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
347 }
348 return refc;
349}
350
351
352/* Unpack LOV object metadata from disk storage. It is packed in LE byte
353 * order and is opaque to the networking layer.
354 */
355int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
356 struct lov_mds_md *lmm, int lmm_bytes)
357{
358 struct obd_device *obd = class_exp2obd(exp);
359 struct lov_obd *lov = &obd->u.lov;
360 int rc = 0, lsm_size;
361 __u16 stripe_count;
362 __u32 magic;
5dd16419 363 __u32 pattern;
d7e09d03
PT
364
365 /* If passed an MDS struct use values from there, otherwise defaults */
366 if (lmm) {
367 rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
368 if (rc)
0a3bdb00 369 return rc;
d7e09d03
PT
370 magic = le32_to_cpu(lmm->lmm_magic);
371 } else {
372 magic = LOV_MAGIC;
373 stripe_count = lov_get_stripecnt(lov, magic, 0);
374 }
375
376 /* If we aren't passed an lsmp struct, we just want the size */
377 if (!lsmp) {
378 /* XXX LOV STACKING call into osc for sizes */
379 LBUG();
0a3bdb00 380 return lov_stripe_md_size(stripe_count);
d7e09d03
PT
381 }
382 /* If we are passed an allocated struct but nothing to unpack, free */
383 if (*lsmp && !lmm) {
384 lov_free_memmd(lsmp);
0a3bdb00 385 return 0;
d7e09d03
PT
386 }
387
5dd16419
JX
388 pattern = le32_to_cpu(lmm->lmm_pattern);
389 lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
d7e09d03 390 if (lsm_size < 0)
0a3bdb00 391 return lsm_size;
d7e09d03
PT
392
393 /* If we are passed a pointer but nothing to unpack, we only alloc */
394 if (!lmm)
0a3bdb00 395 return lsm_size;
d7e09d03
PT
396
397 LASSERT(lsm_op_find(magic) != NULL);
398 rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
399 if (rc) {
400 lov_free_memmd(lsmp);
0a3bdb00 401 return rc;
d7e09d03
PT
402 }
403
0a3bdb00 404 return lsm_size;
d7e09d03
PT
405}
406
407static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
408 struct lov_stripe_md **lsmp,
409 struct lov_user_md *lump)
410{
411 struct obd_device *obd = class_exp2obd(exp);
412 struct lov_obd *lov = &obd->u.lov;
413 char buffer[sizeof(struct lov_user_md_v3)];
414 struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)&buffer[0];
415 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&buffer[0];
416 int lmm_magic;
417 __u16 stripe_count;
418 int rc;
419 int cplen = 0;
d7e09d03
PT
420
421 rc = lov_lum_swab_if_needed(lumv3, &lmm_magic, lump);
422 if (rc)
0a3bdb00 423 return rc;
d7e09d03
PT
424
425 /* in the rest of the tests, as *lumv1 and lumv3 have the same
426 * fields, we use lumv1 to avoid code duplication */
427
428 if (lumv1->lmm_pattern == 0) {
429 lumv1->lmm_pattern = lov->desc.ld_pattern ?
430 lov->desc.ld_pattern : LOV_PATTERN_RAID0;
431 }
432
5dd16419 433 if (lov_pattern(lumv1->lmm_pattern) != LOV_PATTERN_RAID0) {
d7e09d03
PT
434 CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
435 lumv1->lmm_pattern);
0a3bdb00 436 return -EINVAL;
d7e09d03
PT
437 }
438
439 /* 64kB is the largest common page size we see (ia64), and matches the
440 * check in lfs */
441 if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
442 CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
443 lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
444 lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
445 }
446
447 if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
448 (lumv1->lmm_stripe_offset !=
449 (typeof(lumv1->lmm_stripe_offset))(-1))) {
450 CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
451 lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
0a3bdb00 452 return -EINVAL;
d7e09d03
PT
453 }
454 stripe_count = lov_get_stripecnt(lov, lmm_magic,
455 lumv1->lmm_stripe_count);
456
457 if (max_lmm_size) {
458 int max_stripes = (max_lmm_size -
459 lov_mds_md_size(0, lmm_magic)) /
460 sizeof(struct lov_ost_data_v1);
461 if (unlikely(max_stripes < stripe_count)) {
462 CDEBUG(D_IOCTL, "stripe count reset from %d to %d\n",
463 stripe_count, max_stripes);
464 stripe_count = max_stripes;
465 }
466 }
467
468 if (lmm_magic == LOV_USER_MAGIC_V3) {
469 struct pool_desc *pool;
470
471 /* In the function below, .hs_keycmp resolves to
472 * pool_hashkey_keycmp() */
473 /* coverity[overrun-buffer-val] */
474 pool = lov_find_pool(lov, lumv3->lmm_pool_name);
475 if (pool != NULL) {
476 if (lumv3->lmm_stripe_offset !=
477 (typeof(lumv3->lmm_stripe_offset))(-1)) {
478 rc = lov_check_index_in_pool(
479 lumv3->lmm_stripe_offset, pool);
480 if (rc < 0) {
481 lov_pool_putref(pool);
0a3bdb00 482 return -EINVAL;
d7e09d03
PT
483 }
484 }
485
486 if (stripe_count > pool_tgt_count(pool))
487 stripe_count = pool_tgt_count(pool);
488
489 lov_pool_putref(pool);
490 }
491 }
492
5dd16419
JX
493 if (lumv1->lmm_pattern & LOV_PATTERN_F_RELEASED)
494 stripe_count = 0;
495
d7e09d03
PT
496 rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
497
498 if (rc >= 0) {
499 (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
500 (*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
501 if (lmm_magic == LOV_USER_MAGIC_V3) {
502 cplen = strlcpy((*lsmp)->lsm_pool_name,
503 lumv3->lmm_pool_name,
504 sizeof((*lsmp)->lsm_pool_name));
505 if (cplen >= sizeof((*lsmp)->lsm_pool_name))
506 rc = -E2BIG;
507 }
508 rc = 0;
509 }
510
0a3bdb00 511 return rc;
d7e09d03
PT
512}
513
514/* Configure object striping information on a new file.
515 *
516 * @lmmu is a pointer to a user struct with one or more of the fields set to
517 * indicate the application preference: lmm_stripe_count, lmm_stripe_size,
518 * lmm_stripe_offset, and lmm_stripe_pattern. lmm_magic must be LOV_MAGIC.
519 * @lsmp is a pointer to an in-core stripe MD that needs to be filled in.
520 */
521int lov_setstripe(struct obd_export *exp, int max_lmm_size,
522 struct lov_stripe_md **lsmp, struct lov_user_md *lump)
523{
524 int rc;
525 mm_segment_t seg;
526
527 seg = get_fs();
528 set_fs(KERNEL_DS);
529
530 rc = __lov_setstripe(exp, max_lmm_size, lsmp, lump);
531 set_fs(seg);
0a3bdb00 532 return rc;
d7e09d03
PT
533}
534
535int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
536 struct lov_user_md *lump)
537{
538 int i;
539 int rc;
540 struct obd_export *oexp;
541 struct lov_obd *lov = &exp->exp_obd->u.lov;
542 obd_id last_id = 0;
543 struct lov_user_ost_data_v1 *lmm_objects;
544
d7e09d03
PT
545 if (lump->lmm_magic == LOV_USER_MAGIC_V3)
546 lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
547 else
548 lmm_objects = lump->lmm_objects;
549
550 for (i = 0; i < lump->lmm_stripe_count; i++) {
551 __u32 len = sizeof(last_id);
552 oexp = lov->lov_tgts[lmm_objects[i].l_ost_idx]->ltd_exp;
553 rc = obd_get_info(NULL, oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID,
554 &len, &last_id, NULL);
555 if (rc)
0a3bdb00 556 return rc;
d7e09d03
PT
557 if (ostid_id(&lmm_objects[i].l_ost_oi) > last_id) {
558 CERROR("Setting EA for object > than last id on"
559 " ost idx %d "DOSTID" > "LPD64" \n",
560 lmm_objects[i].l_ost_idx,
561 POSTID(&lmm_objects[i].l_ost_oi), last_id);
0a3bdb00 562 return -EINVAL;
d7e09d03
PT
563 }
564 }
565
566 rc = lov_setstripe(exp, 0, lsmp, lump);
567 if (rc)
0a3bdb00 568 return rc;
d7e09d03
PT
569
570 for (i = 0; i < lump->lmm_stripe_count; i++) {
571 (*lsmp)->lsm_oinfo[i]->loi_ost_idx =
572 lmm_objects[i].l_ost_idx;
573 (*lsmp)->lsm_oinfo[i]->loi_oi = lmm_objects[i].l_ost_oi;
574 }
0a3bdb00 575 return 0;
d7e09d03
PT
576}
577
578
579/* Retrieve object striping information.
580 *
581 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
582 * the maximum number of OST indices which will fit in the user buffer.
583 * lmm_magic must be LOV_USER_MAGIC.
584 */
585int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
586 struct lov_user_md *lump)
587{
588 /*
589 * XXX huge struct allocated on stack.
590 */
591 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
592 struct lov_user_md_v3 lum;
593 struct lov_mds_md *lmmk = NULL;
594 int rc, lmm_size;
595 int lum_size;
596 mm_segment_t seg;
d7e09d03
PT
597
598 if (!lsm)
0a3bdb00 599 return -ENODATA;
d7e09d03
PT
600
601 /*
602 * "Switch to kernel segment" to allow copying from kernel space by
603 * copy_{to,from}_user().
604 */
605 seg = get_fs();
606 set_fs(KERNEL_DS);
607
608 /* we only need the header part from user space to get lmm_magic and
609 * lmm_stripe_count, (the header part is common to v1 and v3) */
610 lum_size = sizeof(struct lov_user_md_v1);
611 if (copy_from_user(&lum, lump, lum_size))
612 GOTO(out_set, rc = -EFAULT);
613 else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
614 (lum.lmm_magic != LOV_USER_MAGIC_V3))
615 GOTO(out_set, rc = -EINVAL);
616
617 if (lum.lmm_stripe_count &&
618 (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
619 /* Return right size of stripe to user */
620 lum.lmm_stripe_count = lsm->lsm_stripe_count;
621 rc = copy_to_user(lump, &lum, lum_size);
622 GOTO(out_set, rc = -EOVERFLOW);
623 }
624 rc = lov_packmd(exp, &lmmk, lsm);
625 if (rc < 0)
626 GOTO(out_set, rc);
627 lmm_size = rc;
628 rc = 0;
629
630 /* FIXME: Bug 1185 - copy fields properly when structs change */
631 /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
632 CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
633 CLASSERT(sizeof lum.lmm_objects[0] == sizeof lmmk->lmm_objects[0]);
634
635 if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
636 ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
637 (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
638 lustre_swab_lov_mds_md(lmmk);
639 lustre_swab_lov_user_md_objects(
640 (struct lov_user_ost_data*)lmmk->lmm_objects,
641 lmmk->lmm_stripe_count);
642 }
643 if (lum.lmm_magic == LOV_USER_MAGIC) {
644 /* User request for v1, we need skip lmm_pool_name */
645 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
646 memmove((char*)(&lmmk->lmm_stripe_count) +
647 sizeof(lmmk->lmm_stripe_count),
648 ((struct lov_mds_md_v3*)lmmk)->lmm_objects,
649 lmmk->lmm_stripe_count *
650 sizeof(struct lov_ost_data_v1));
651 lmm_size -= LOV_MAXPOOLNAME;
652 }
653 } else {
654 /* if v3 we just have to update the lum_size */
655 lum_size = sizeof(struct lov_user_md_v3);
656 }
657
658 /* User wasn't expecting this many OST entries */
659 if (lum.lmm_stripe_count == 0)
660 lmm_size = lum_size;
661 else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count)
662 GOTO(out_set, rc = -EOVERFLOW);
663 /*
664 * Have a difference between lov_mds_md & lov_user_md.
665 * So we have to re-order the data before copy to user.
666 */
667 lum.lmm_stripe_count = lmmk->lmm_stripe_count;
668 lum.lmm_layout_gen = lmmk->lmm_layout_gen;
669 ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
670 ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
671 if (copy_to_user(lump, lmmk, lmm_size))
672 rc = -EFAULT;
673
674 obd_free_diskmd(exp, &lmmk);
675out_set:
676 set_fs(seg);
0a3bdb00 677 return rc;
d7e09d03 678}
This page took 0.089624 seconds and 5 git commands to generate.