Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / dir.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/dir.c
37 *
38 * Directory code for lustre client.
39 */
40
41 #include <linux/fs.h>
42 #include <linux/pagemap.h>
43 #include <linux/mm.h>
44 #include <linux/uaccess.h>
45 #include <linux/buffer_head.h> /* for wait_on_buffer */
46 #include <linux/pagevec.h>
47 #include <linux/prefetch.h>
48
49 #define DEBUG_SUBSYSTEM S_LLITE
50
51 #include "../include/obd_support.h"
52 #include "../include/obd_class.h"
53 #include "../include/lustre_lib.h"
54 #include "../include/lustre/lustre_idl.h"
55 #include "../include/lustre_lite.h"
56 #include "../include/lustre_dlm.h"
57 #include "../include/lustre_fid.h"
58 #include "../include/lustre_kernelcomm.h"
59 #include "llite_internal.h"
60
61 /*
62 * (new) readdir implementation overview.
63 *
64 * Original lustre readdir implementation cached exact copy of raw directory
65 * pages on the client. These pages were indexed in client page cache by
66 * logical offset in the directory file. This design, while very simple and
67 * intuitive had some inherent problems:
68 *
69 * . it implies that byte offset to the directory entry serves as a
70 * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
71 * ext3/htree directory entries may move due to splits, and more
72 * importantly,
73 *
74 * . it is incompatible with the design of split directories for cmd3,
75 * that assumes that names are distributed across nodes based on their
76 * hash, and so readdir should be done in hash order.
77 *
78 * New readdir implementation does readdir in hash order, and uses hash of a
79 * file name as a telldir/seekdir cookie. This led to number of complications:
80 *
81 * . hash is not unique, so it cannot be used to index cached directory
82 * pages on the client (note, that it requires a whole pageful of hash
83 * collided entries to cause two pages to have identical hashes);
84 *
85 * . hash is not unique, so it cannot, strictly speaking, be used as an
86 * entry cookie. ext3/htree has the same problem and lustre implementation
87 * mimics their solution: seekdir(hash) positions directory at the first
88 * entry with the given hash.
89 *
90 * Client side.
91 *
92 * 0. caching
93 *
94 * Client caches directory pages using hash of the first entry as an index. As
95 * noted above hash is not unique, so this solution doesn't work as is:
96 * special processing is needed for "page hash chains" (i.e., sequences of
97 * pages filled with entries all having the same hash value).
98 *
99 * First, such chains have to be detected. To this end, server returns to the
100 * client the hash of the first entry on the page next to one returned. When
101 * client detects that this hash is the same as hash of the first entry on the
102 * returned page, page hash collision has to be handled. Pages in the
103 * hash chain, except first one, are termed "overflow pages".
104 *
105 * Solution to index uniqueness problem is to not cache overflow
106 * pages. Instead, when page hash collision is detected, all overflow pages
107 * from emerging chain are immediately requested from the server and placed in
108 * a special data structure (struct ll_dir_chain). This data structure is used
109 * by ll_readdir() to process entries from overflow pages. When readdir
110 * invocation finishes, overflow pages are discarded. If page hash collision
111 * chain weren't completely processed, next call to readdir will again detect
112 * page hash collision, again read overflow pages in, process next portion of
113 * entries and again discard the pages. This is not as wasteful as it looks,
114 * because, given reasonable hash, page hash collisions are extremely rare.
115 *
116 * 1. directory positioning
117 *
118 * When seekdir(hash) is called, original
119 *
120 *
121 *
122 *
123 *
124 *
125 *
126 *
127 * Server.
128 *
129 * identification of and access to overflow pages
130 *
131 * page format
132 *
133 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 *
140 */
141
142 /* returns the page unlocked, but with a reference */
143 static int ll_dir_filler(void *_hash, struct page *page0)
144 {
145 struct inode *inode = page0->mapping->host;
146 int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
147 struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
148 struct ptlrpc_request *request;
149 struct mdt_body *body;
150 struct md_op_data *op_data;
151 __u64 hash = *((__u64 *)_hash);
152 struct page **page_pool;
153 struct page *page;
154 struct lu_dirpage *dp;
155 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
156 int nrdpgs = 0; /* number of pages read actually */
157 int npages;
158 int i;
159 int rc;
160
161 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
162 PFID(ll_inode2fid(inode)), inode, hash);
163
164 LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
165
166 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
167 LUSTRE_OPC_ANY, NULL);
168 if (IS_ERR(op_data))
169 return PTR_ERR(op_data);
170
171 page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
172 if (page_pool) {
173 page_pool[0] = page0;
174 } else {
175 page_pool = &page0;
176 max_pages = 1;
177 }
178 for (npages = 1; npages < max_pages; npages++) {
179 page = page_cache_alloc_cold(inode->i_mapping);
180 if (!page)
181 break;
182 page_pool[npages] = page;
183 }
184
185 op_data->op_npages = npages;
186 op_data->op_offset = hash;
187 rc = md_readpage(exp, op_data, page_pool, &request);
188 ll_finish_md_op_data(op_data);
189 if (rc < 0) {
190 /* page0 is special, which was added into page cache early */
191 delete_from_page_cache(page0);
192 } else if (rc == 0) {
193 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
194 /* Checked by mdc_readpage() */
195 if (body->valid & OBD_MD_FLSIZE)
196 i_size_write(inode, body->size);
197
198 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
199 >> PAGE_SHIFT;
200 SetPageUptodate(page0);
201 }
202 unlock_page(page0);
203 ptlrpc_req_finished(request);
204
205 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
206
207 for (i = 1; i < npages; i++) {
208 unsigned long offset;
209 int ret;
210
211 page = page_pool[i];
212
213 if (rc < 0 || i >= nrdpgs) {
214 put_page(page);
215 continue;
216 }
217
218 SetPageUptodate(page);
219
220 dp = kmap(page);
221 hash = le64_to_cpu(dp->ldp_hash_start);
222 kunmap(page);
223
224 offset = hash_x_index(hash, hash64);
225
226 prefetchw(&page->flags);
227 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
228 GFP_NOFS);
229 if (ret == 0) {
230 unlock_page(page);
231 } else {
232 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
233 offset, ret);
234 }
235 put_page(page);
236 }
237
238 if (page_pool != &page0)
239 kfree(page_pool);
240 return rc;
241 }
242
243 void ll_release_page(struct page *page, int remove)
244 {
245 kunmap(page);
246 if (remove) {
247 lock_page(page);
248 if (likely(page->mapping))
249 truncate_complete_page(page->mapping, page);
250 unlock_page(page);
251 }
252 put_page(page);
253 }
254
255 /*
256 * Find, kmap and return page that contains given hash.
257 */
258 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
259 __u64 *start, __u64 *end)
260 {
261 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
262 struct address_space *mapping = dir->i_mapping;
263 /*
264 * Complement of hash is used as an index so that
265 * radix_tree_gang_lookup() can be used to find a page with starting
266 * hash _smaller_ than one we are looking for.
267 */
268 unsigned long offset = hash_x_index(*hash, hash64);
269 struct page *page;
270 int found;
271
272 spin_lock_irq(&mapping->tree_lock);
273 found = radix_tree_gang_lookup(&mapping->page_tree,
274 (void **)&page, offset, 1);
275 if (found > 0 && !radix_tree_exceptional_entry(page)) {
276 struct lu_dirpage *dp;
277
278 get_page(page);
279 spin_unlock_irq(&mapping->tree_lock);
280 /*
281 * In contrast to find_lock_page() we are sure that directory
282 * page cannot be truncated (while DLM lock is held) and,
283 * hence, can avoid restart.
284 *
285 * In fact, page cannot be locked here at all, because
286 * ll_dir_filler() does synchronous io.
287 */
288 wait_on_page_locked(page);
289 if (PageUptodate(page)) {
290 dp = kmap(page);
291 if (BITS_PER_LONG == 32 && hash64) {
292 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
293 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
294 *hash = *hash >> 32;
295 } else {
296 *start = le64_to_cpu(dp->ldp_hash_start);
297 *end = le64_to_cpu(dp->ldp_hash_end);
298 }
299 LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
300 *start, *end, *hash);
301 CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
302 offset, *start, *end, *hash);
303 if (*hash > *end) {
304 ll_release_page(page, 0);
305 page = NULL;
306 } else if (*end != *start && *hash == *end) {
307 /*
308 * upon hash collision, remove this page,
309 * otherwise put page reference, and
310 * ll_get_dir_page() will issue RPC to fetch
311 * the page we want.
312 */
313 ll_release_page(page,
314 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
315 page = NULL;
316 }
317 } else {
318 put_page(page);
319 page = ERR_PTR(-EIO);
320 }
321
322 } else {
323 spin_unlock_irq(&mapping->tree_lock);
324 page = NULL;
325 }
326 return page;
327 }
328
329 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
330 struct ll_dir_chain *chain)
331 {
332 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
333 struct address_space *mapping = dir->i_mapping;
334 struct lustre_handle lockh;
335 struct lu_dirpage *dp;
336 struct page *page;
337 enum ldlm_mode mode;
338 int rc;
339 __u64 start = 0;
340 __u64 end = 0;
341 __u64 lhash = hash;
342 struct ll_inode_info *lli = ll_i2info(dir);
343 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
344
345 mode = LCK_PR;
346 rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
347 ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
348 if (!rc) {
349 struct ldlm_enqueue_info einfo = {
350 .ei_type = LDLM_IBITS,
351 .ei_mode = mode,
352 .ei_cb_bl = ll_md_blocking_ast,
353 .ei_cb_cp = ldlm_completion_ast,
354 };
355 struct lookup_intent it = { .it_op = IT_READDIR };
356 struct ptlrpc_request *request;
357 struct md_op_data *op_data;
358
359 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
360 LUSTRE_OPC_ANY, NULL);
361 if (IS_ERR(op_data))
362 return (void *)op_data;
363
364 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
365 op_data, &lockh, NULL, 0, NULL, 0);
366
367 ll_finish_md_op_data(op_data);
368
369 request = (struct ptlrpc_request *)it.d.lustre.it_data;
370 if (request)
371 ptlrpc_req_finished(request);
372 if (rc < 0) {
373 CERROR("lock enqueue: " DFID " at %llu: rc %d\n",
374 PFID(ll_inode2fid(dir)), hash, rc);
375 return ERR_PTR(rc);
376 }
377
378 CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
379 PFID(ll_inode2fid(dir)), dir);
380 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
381 &it.d.lustre.it_lock_handle, dir, NULL);
382 } else {
383 /* for cross-ref object, l_ast_data of the lock may not be set,
384 * we reset it here
385 */
386 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
387 dir, NULL);
388 }
389 ldlm_lock_dump_handle(D_OTHER, &lockh);
390
391 mutex_lock(&lli->lli_readdir_mutex);
392 page = ll_dir_page_locate(dir, &lhash, &start, &end);
393 if (IS_ERR(page)) {
394 CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
395 PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
396 goto out_unlock;
397 } else if (page) {
398 /*
399 * XXX nikita: not entirely correct handling of a corner case:
400 * suppose hash chain of entries with hash value HASH crosses
401 * border between pages P0 and P1. First both P0 and P1 are
402 * cached, seekdir() is called for some entry from the P0 part
403 * of the chain. Later P0 goes out of cache. telldir(HASH)
404 * happens and finds P1, as it starts with matching hash
405 * value. Remaining entries from P0 part of the chain are
406 * skipped. (Is that really a bug?)
407 *
408 * Possible solutions: 0. don't cache P1 is such case, handle
409 * it as an "overflow" page. 1. invalidate all pages at
410 * once. 2. use HASH|1 as an index for P1.
411 */
412 goto hash_collision;
413 }
414
415 page = read_cache_page(mapping, hash_x_index(hash, hash64),
416 ll_dir_filler, &lhash);
417 if (IS_ERR(page)) {
418 CERROR("read cache page: "DFID" at %llu: rc %ld\n",
419 PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
420 goto out_unlock;
421 }
422
423 wait_on_page_locked(page);
424 (void)kmap(page);
425 if (!PageUptodate(page)) {
426 CERROR("page not updated: "DFID" at %llu: rc %d\n",
427 PFID(ll_inode2fid(dir)), hash, -5);
428 goto fail;
429 }
430 if (!PageChecked(page))
431 /* XXX: check page format later */
432 SetPageChecked(page);
433 if (PageError(page)) {
434 CERROR("page error: "DFID" at %llu: rc %d\n",
435 PFID(ll_inode2fid(dir)), hash, -5);
436 goto fail;
437 }
438 hash_collision:
439 dp = page_address(page);
440 if (BITS_PER_LONG == 32 && hash64) {
441 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
442 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
443 lhash = hash >> 32;
444 } else {
445 start = le64_to_cpu(dp->ldp_hash_start);
446 end = le64_to_cpu(dp->ldp_hash_end);
447 lhash = hash;
448 }
449 if (end == start) {
450 LASSERT(start == lhash);
451 CWARN("Page-wide hash collision: %llu\n", end);
452 if (BITS_PER_LONG == 32 && hash64)
453 CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
454 le64_to_cpu(dp->ldp_hash_start),
455 le64_to_cpu(dp->ldp_hash_end), hash);
456 /*
457 * Fetch whole overflow chain...
458 *
459 * XXX not yet.
460 */
461 goto fail;
462 }
463 out_unlock:
464 mutex_unlock(&lli->lli_readdir_mutex);
465 ldlm_lock_decref(&lockh, mode);
466 return page;
467
468 fail:
469 ll_release_page(page, 1);
470 page = ERR_PTR(-EIO);
471 goto out_unlock;
472 }
473
474 /**
475 * return IF_* type for given lu_dirent entry.
476 * IF_* flag shld be converted to particular OS file type in
477 * platform llite module.
478 */
479 static __u16 ll_dirent_type_get(struct lu_dirent *ent)
480 {
481 __u16 type = 0;
482 struct luda_type *lt;
483 int len = 0;
484
485 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
486 const unsigned int align = sizeof(struct luda_type) - 1;
487
488 len = le16_to_cpu(ent->lde_namelen);
489 len = (len + align) & ~align;
490 lt = (void *)ent->lde_name + len;
491 type = IFTODT(le16_to_cpu(lt->lt_type));
492 }
493 return type;
494 }
495
496 int ll_dir_read(struct inode *inode, struct dir_context *ctx)
497 {
498 struct ll_inode_info *info = ll_i2info(inode);
499 struct ll_sb_info *sbi = ll_i2sbi(inode);
500 __u64 pos = ctx->pos;
501 int api32 = ll_need_32bit_api(sbi);
502 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
503 struct page *page;
504 struct ll_dir_chain chain;
505 int done = 0;
506 int rc = 0;
507
508 ll_dir_chain_init(&chain);
509
510 page = ll_get_dir_page(inode, pos, &chain);
511
512 while (rc == 0 && !done) {
513 struct lu_dirpage *dp;
514 struct lu_dirent *ent;
515
516 if (!IS_ERR(page)) {
517 /*
518 * If page is empty (end of directory is reached),
519 * use this value.
520 */
521 __u64 hash = MDS_DIR_END_OFF;
522 __u64 next;
523
524 dp = page_address(page);
525 for (ent = lu_dirent_start(dp); ent && !done;
526 ent = lu_dirent_next(ent)) {
527 __u16 type;
528 int namelen;
529 struct lu_fid fid;
530 __u64 lhash;
531 __u64 ino;
532
533 /*
534 * XXX: implement correct swabbing here.
535 */
536
537 hash = le64_to_cpu(ent->lde_hash);
538 if (hash < pos)
539 /*
540 * Skip until we find target hash
541 * value.
542 */
543 continue;
544
545 namelen = le16_to_cpu(ent->lde_namelen);
546 if (namelen == 0)
547 /*
548 * Skip dummy record.
549 */
550 continue;
551
552 if (api32 && hash64)
553 lhash = hash >> 32;
554 else
555 lhash = hash;
556 fid_le_to_cpu(&fid, &ent->lde_fid);
557 ino = cl_fid_build_ino(&fid, api32);
558 type = ll_dirent_type_get(ent);
559 ctx->pos = lhash;
560 /* For 'll_nfs_get_name_filldir()', it will try
561 * to access the 'ent' through its 'lde_name',
562 * so the parameter 'name' for 'ctx->actor()'
563 * must be part of the 'ent'.
564 */
565 done = !dir_emit(ctx, ent->lde_name,
566 namelen, ino, type);
567 }
568 next = le64_to_cpu(dp->ldp_hash_end);
569 if (!done) {
570 pos = next;
571 if (pos == MDS_DIR_END_OFF) {
572 /*
573 * End of directory reached.
574 */
575 done = 1;
576 ll_release_page(page, 0);
577 } else if (1 /* chain is exhausted*/) {
578 /*
579 * Normal case: continue to the next
580 * page.
581 */
582 ll_release_page(page,
583 le32_to_cpu(dp->ldp_flags) &
584 LDF_COLLIDE);
585 next = pos;
586 page = ll_get_dir_page(inode, pos,
587 &chain);
588 } else {
589 /*
590 * go into overflow page.
591 */
592 LASSERT(le32_to_cpu(dp->ldp_flags) &
593 LDF_COLLIDE);
594 ll_release_page(page, 1);
595 }
596 } else {
597 pos = hash;
598 ll_release_page(page, 0);
599 }
600 } else {
601 rc = PTR_ERR(page);
602 CERROR("error reading dir "DFID" at %lu: rc %d\n",
603 PFID(&info->lli_fid), (unsigned long)pos, rc);
604 }
605 }
606
607 ctx->pos = pos;
608 ll_dir_chain_fini(&chain);
609 return rc;
610 }
611
612 static int ll_readdir(struct file *filp, struct dir_context *ctx)
613 {
614 struct inode *inode = file_inode(filp);
615 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
616 struct ll_sb_info *sbi = ll_i2sbi(inode);
617 __u64 pos = lfd ? lfd->lfd_pos : 0;
618 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
619 int api32 = ll_need_32bit_api(sbi);
620 int rc;
621
622 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
623 PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
624 i_size_read(inode), api32);
625
626 if (pos == MDS_DIR_END_OFF) {
627 /*
628 * end-of-file.
629 */
630 rc = 0;
631 goto out;
632 }
633
634 ctx->pos = pos;
635 rc = ll_dir_read(inode, ctx);
636 if (lfd)
637 lfd->lfd_pos = ctx->pos;
638 if (ctx->pos == MDS_DIR_END_OFF) {
639 if (api32)
640 ctx->pos = LL_DIR_END_OFF_32BIT;
641 else
642 ctx->pos = LL_DIR_END_OFF;
643 } else {
644 if (api32 && hash64)
645 ctx->pos >>= 32;
646 }
647 filp->f_version = inode->i_version;
648
649 out:
650 if (!rc)
651 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
652
653 return rc;
654 }
655
656 static int ll_send_mgc_param(struct obd_export *mgc, char *string)
657 {
658 struct mgs_send_param *msp;
659 int rc = 0;
660
661 msp = kzalloc(sizeof(*msp), GFP_NOFS);
662 if (!msp)
663 return -ENOMEM;
664
665 strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param));
666 rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
667 sizeof(struct mgs_send_param), msp, NULL);
668 if (rc)
669 CERROR("Failed to set parameter: %d\n", rc);
670 kfree(msp);
671
672 return rc;
673 }
674
675 static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
676 char *filename)
677 {
678 struct ptlrpc_request *request = NULL;
679 struct md_op_data *op_data;
680 struct ll_sb_info *sbi = ll_i2sbi(dir);
681 int mode;
682 int err;
683
684 mode = (~current_umask() & 0755) | S_IFDIR;
685 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
686 strlen(filename), mode, LUSTRE_OPC_MKDIR,
687 lump);
688 if (IS_ERR(op_data)) {
689 err = PTR_ERR(op_data);
690 goto err_exit;
691 }
692
693 op_data->op_cli_flags |= CLI_SET_MEA;
694 err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
695 from_kuid(&init_user_ns, current_fsuid()),
696 from_kgid(&init_user_ns, current_fsgid()),
697 cfs_curproc_cap_pack(), 0, &request);
698 ll_finish_md_op_data(op_data);
699 if (err)
700 goto err_exit;
701 err_exit:
702 ptlrpc_req_finished(request);
703 return err;
704 }
705
706 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
707 int set_default)
708 {
709 struct ll_sb_info *sbi = ll_i2sbi(inode);
710 struct md_op_data *op_data;
711 struct ptlrpc_request *req = NULL;
712 int rc = 0;
713 struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
714 struct obd_device *mgc = lsi->lsi_mgc;
715 int lum_size;
716
717 if (lump) {
718 /*
719 * This is coming from userspace, so should be in
720 * local endian. But the MDS would like it in little
721 * endian, so we swab it before we send it.
722 */
723 switch (lump->lmm_magic) {
724 case LOV_USER_MAGIC_V1: {
725 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
726 lustre_swab_lov_user_md_v1(lump);
727 lum_size = sizeof(struct lov_user_md_v1);
728 break;
729 }
730 case LOV_USER_MAGIC_V3: {
731 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
732 lustre_swab_lov_user_md_v3(
733 (struct lov_user_md_v3 *)lump);
734 lum_size = sizeof(struct lov_user_md_v3);
735 break;
736 }
737 default: {
738 CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
739 lump->lmm_magic, LOV_USER_MAGIC_V1,
740 LOV_USER_MAGIC_V3);
741 return -EINVAL;
742 }
743 }
744 } else {
745 lum_size = sizeof(struct lov_user_md_v1);
746 }
747
748 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
749 LUSTRE_OPC_ANY, NULL);
750 if (IS_ERR(op_data))
751 return PTR_ERR(op_data);
752
753 if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
754 op_data->op_cli_flags |= CLI_SET_MEA;
755
756 /* swabbing is done in lov_setstripe() on server side */
757 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
758 NULL, 0, &req, NULL);
759 ll_finish_md_op_data(op_data);
760 ptlrpc_req_finished(req);
761 if (rc) {
762 if (rc != -EPERM && rc != -EACCES)
763 CERROR("mdc_setattr fails: rc = %d\n", rc);
764 }
765
766 /* In the following we use the fact that LOV_USER_MAGIC_V1 and
767 * LOV_USER_MAGIC_V3 have the same initial fields so we do not
768 * need to make the distinction between the 2 versions
769 */
770 if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
771 char *param = NULL;
772 char *buf;
773
774 param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
775 if (!param)
776 return -ENOMEM;
777
778 buf = param;
779 /* Get fsname and assume devname to be -MDT0000. */
780 ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
781 strcat(buf, "-MDT0000.lov");
782 buf += strlen(buf);
783
784 /* Set root stripesize */
785 sprintf(buf, ".stripesize=%u",
786 lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
787 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
788 if (rc)
789 goto end;
790
791 /* Set root stripecount */
792 sprintf(buf, ".stripecount=%hd",
793 lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
794 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
795 if (rc)
796 goto end;
797
798 /* Set root stripeoffset */
799 sprintf(buf, ".stripeoffset=%hd",
800 lump ? le16_to_cpu(lump->lmm_stripe_offset) :
801 (typeof(lump->lmm_stripe_offset))(-1));
802 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
803
804 end:
805 kfree(param);
806 }
807 return rc;
808 }
809
810 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
811 int *lmm_size, struct ptlrpc_request **request)
812 {
813 struct ll_sb_info *sbi = ll_i2sbi(inode);
814 struct mdt_body *body;
815 struct lov_mds_md *lmm = NULL;
816 struct ptlrpc_request *req = NULL;
817 int rc, lmmsize;
818 struct md_op_data *op_data;
819
820 rc = ll_get_default_mdsize(sbi, &lmmsize);
821 if (rc)
822 return rc;
823
824 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
825 0, lmmsize, LUSTRE_OPC_ANY,
826 NULL);
827 if (IS_ERR(op_data))
828 return PTR_ERR(op_data);
829
830 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
831 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
832 ll_finish_md_op_data(op_data);
833 if (rc < 0) {
834 CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
835 PFID(ll_inode2fid(inode)), rc);
836 goto out;
837 }
838
839 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
840
841 lmmsize = body->eadatasize;
842
843 if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
844 lmmsize == 0) {
845 rc = -ENODATA;
846 goto out;
847 }
848
849 lmm = req_capsule_server_sized_get(&req->rq_pill,
850 &RMF_MDT_MD, lmmsize);
851
852 /*
853 * This is coming from the MDS, so is probably in
854 * little endian. We convert it to host endian before
855 * passing it to userspace.
856 */
857 /* We don't swab objects for directories */
858 switch (le32_to_cpu(lmm->lmm_magic)) {
859 case LOV_MAGIC_V1:
860 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
861 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
862 break;
863 case LOV_MAGIC_V3:
864 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
865 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
866 break;
867 default:
868 CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
869 rc = -EPROTO;
870 }
871 out:
872 *lmmp = lmm;
873 *lmm_size = lmmsize;
874 *request = req;
875 return rc;
876 }
877
878 /*
879 * Get MDT index for the inode.
880 */
881 int ll_get_mdt_idx(struct inode *inode)
882 {
883 struct ll_sb_info *sbi = ll_i2sbi(inode);
884 struct md_op_data *op_data;
885 int rc, mdtidx;
886
887 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
888 0, LUSTRE_OPC_ANY, NULL);
889 if (IS_ERR(op_data))
890 return PTR_ERR(op_data);
891
892 op_data->op_flags |= MF_GET_MDT_IDX;
893 rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
894 mdtidx = op_data->op_mds;
895 ll_finish_md_op_data(op_data);
896 if (rc < 0) {
897 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
898 return rc;
899 }
900 return mdtidx;
901 }
902
903 /**
904 * Generic handler to do any pre-copy work.
905 *
906 * It sends a first hsm_progress (with extent length == 0) to coordinator as a
907 * first information for it that real work has started.
908 *
909 * Moreover, for a ARCHIVE request, it will sample the file data version and
910 * store it in \a copy.
911 *
912 * \return 0 on success.
913 */
914 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
915 {
916 struct ll_sb_info *sbi = ll_s2sbi(sb);
917 struct hsm_progress_kernel hpk;
918 int rc;
919
920 /* Forge a hsm_progress based on data from copy. */
921 hpk.hpk_fid = copy->hc_hai.hai_fid;
922 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
923 hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
924 hpk.hpk_extent.length = 0;
925 hpk.hpk_flags = 0;
926 hpk.hpk_errval = 0;
927 hpk.hpk_data_version = 0;
928
929 /* For archive request, we need to read the current file version. */
930 if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
931 struct inode *inode;
932 __u64 data_version = 0;
933
934 /* Get inode for this fid */
935 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
936 if (IS_ERR(inode)) {
937 hpk.hpk_flags |= HP_FLAG_RETRY;
938 /* hpk_errval is >= 0 */
939 hpk.hpk_errval = -PTR_ERR(inode);
940 rc = PTR_ERR(inode);
941 goto progress;
942 }
943
944 /* Read current file data version */
945 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
946 iput(inode);
947 if (rc != 0) {
948 CDEBUG(D_HSM, "Could not read file data version of "
949 DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
950 PFID(&copy->hc_hai.hai_fid), rc,
951 copy->hc_hai.hai_cookie);
952 hpk.hpk_flags |= HP_FLAG_RETRY;
953 /* hpk_errval must be >= 0 */
954 hpk.hpk_errval = -rc;
955 goto progress;
956 }
957
958 /* Store in the hsm_copy for later copytool use.
959 * Always modified even if no lsm.
960 */
961 copy->hc_data_version = data_version;
962 }
963
964 progress:
965 /* On error, the request should be considered as completed */
966 if (hpk.hpk_errval > 0)
967 hpk.hpk_flags |= HP_FLAG_COMPLETED;
968 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
969 &hpk, NULL);
970
971 return rc;
972 }
973
974 /**
975 * Generic handler to do any post-copy work.
976 *
977 * It will send the last hsm_progress update to coordinator to inform it
978 * that copy is finished and whether it was successful or not.
979 *
980 * Moreover,
981 * - for ARCHIVE request, it will sample the file data version and compare it
982 * with the version saved in ll_ioc_copy_start(). If they do not match, copy
983 * will be considered as failed.
984 * - for RESTORE request, it will sample the file data version and send it to
985 * coordinator which is useful if the file was imported as 'released'.
986 *
987 * \return 0 on success.
988 */
989 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
990 {
991 struct ll_sb_info *sbi = ll_s2sbi(sb);
992 struct hsm_progress_kernel hpk;
993 int rc;
994
995 /* If you modify the logic here, also check llapi_hsm_copy_end(). */
996 /* Take care: copy->hc_hai.hai_action, len, gid and data are not
997 * initialized if copy_end was called with copy == NULL.
998 */
999
1000 /* Forge a hsm_progress based on data from copy. */
1001 hpk.hpk_fid = copy->hc_hai.hai_fid;
1002 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
1003 hpk.hpk_extent = copy->hc_hai.hai_extent;
1004 hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
1005 hpk.hpk_errval = copy->hc_errval;
1006 hpk.hpk_data_version = 0;
1007
1008 /* For archive request, we need to check the file data was not changed.
1009 *
1010 * For restore request, we need to send the file data version, this is
1011 * useful when the file was created using hsm_import.
1012 */
1013 if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
1014 (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
1015 (copy->hc_errval == 0)) {
1016 struct inode *inode;
1017 __u64 data_version = 0;
1018
1019 /* Get lsm for this fid */
1020 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
1021 if (IS_ERR(inode)) {
1022 hpk.hpk_flags |= HP_FLAG_RETRY;
1023 /* hpk_errval must be >= 0 */
1024 hpk.hpk_errval = -PTR_ERR(inode);
1025 rc = PTR_ERR(inode);
1026 goto progress;
1027 }
1028
1029 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
1030 iput(inode);
1031 if (rc) {
1032 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
1033 if (hpk.hpk_errval == 0)
1034 hpk.hpk_errval = -rc;
1035 goto progress;
1036 }
1037
1038 /* Store in the hsm_copy for later copytool use.
1039 * Always modified even if no lsm.
1040 */
1041 hpk.hpk_data_version = data_version;
1042
1043 /* File could have been stripped during archiving, so we need
1044 * to check anyway.
1045 */
1046 if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1047 (copy->hc_data_version != data_version)) {
1048 CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
1049 DFID", start:%#llx current:%#llx\n",
1050 PFID(&copy->hc_hai.hai_fid),
1051 copy->hc_data_version, data_version);
1052 /* File was changed, send error to cdt. Do not ask for
1053 * retry because if a file is modified frequently,
1054 * the cdt will loop on retried archive requests.
1055 * The policy engine will ask for a new archive later
1056 * when the file will not be modified for some tunable
1057 * time
1058 */
1059 /* we do not notify caller */
1060 hpk.hpk_flags &= ~HP_FLAG_RETRY;
1061 /* hpk_errval must be >= 0 */
1062 hpk.hpk_errval = EBUSY;
1063 }
1064 }
1065
1066 progress:
1067 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1068 &hpk, NULL);
1069
1070 return rc;
1071 }
1072
1073 static int copy_and_ioctl(int cmd, struct obd_export *exp,
1074 const void __user *data, size_t size)
1075 {
1076 void *copy;
1077 int rc;
1078
1079 copy = kzalloc(size, GFP_NOFS);
1080 if (!copy)
1081 return -ENOMEM;
1082
1083 if (copy_from_user(copy, data, size)) {
1084 rc = -EFAULT;
1085 goto out;
1086 }
1087
1088 rc = obd_iocontrol(cmd, exp, size, copy, NULL);
1089 out:
1090 kfree(copy);
1091
1092 return rc;
1093 }
1094
1095 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1096 {
1097 int cmd = qctl->qc_cmd;
1098 int type = qctl->qc_type;
1099 int id = qctl->qc_id;
1100 int valid = qctl->qc_valid;
1101 int rc = 0;
1102
1103 switch (cmd) {
1104 case LUSTRE_Q_INVALIDATE:
1105 case LUSTRE_Q_FINVALIDATE:
1106 case Q_QUOTAON:
1107 case Q_QUOTAOFF:
1108 case Q_SETQUOTA:
1109 case Q_SETINFO:
1110 if (!capable(CFS_CAP_SYS_ADMIN) ||
1111 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1112 return -EPERM;
1113 break;
1114 case Q_GETQUOTA:
1115 if (((type == USRQUOTA &&
1116 !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
1117 (type == GRPQUOTA &&
1118 !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
1119 (!capable(CFS_CAP_SYS_ADMIN) ||
1120 sbi->ll_flags & LL_SBI_RMT_CLIENT))
1121 return -EPERM;
1122 break;
1123 case Q_GETINFO:
1124 break;
1125 default:
1126 CERROR("unsupported quotactl op: %#x\n", cmd);
1127 return -ENOTTY;
1128 }
1129
1130 if (valid != QC_GENERAL) {
1131 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
1132 return -EOPNOTSUPP;
1133
1134 if (cmd == Q_GETINFO)
1135 qctl->qc_cmd = Q_GETOINFO;
1136 else if (cmd == Q_GETQUOTA)
1137 qctl->qc_cmd = Q_GETOQUOTA;
1138 else
1139 return -EINVAL;
1140
1141 switch (valid) {
1142 case QC_MDTIDX:
1143 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1144 sizeof(*qctl), qctl, NULL);
1145 break;
1146 case QC_OSTIDX:
1147 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1148 sizeof(*qctl), qctl, NULL);
1149 break;
1150 case QC_UUID:
1151 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1152 sizeof(*qctl), qctl, NULL);
1153 if (rc == -EAGAIN)
1154 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1155 sbi->ll_dt_exp,
1156 sizeof(*qctl), qctl, NULL);
1157 break;
1158 default:
1159 rc = -EINVAL;
1160 break;
1161 }
1162
1163 if (rc)
1164 return rc;
1165
1166 qctl->qc_cmd = cmd;
1167 } else {
1168 struct obd_quotactl *oqctl;
1169
1170 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1171 if (!oqctl)
1172 return -ENOMEM;
1173
1174 QCTL_COPY(oqctl, qctl);
1175 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1176 if (rc) {
1177 if (rc != -EALREADY && cmd == Q_QUOTAON) {
1178 oqctl->qc_cmd = Q_QUOTAOFF;
1179 obd_quotactl(sbi->ll_md_exp, oqctl);
1180 }
1181 kfree(oqctl);
1182 return rc;
1183 }
1184 /* If QIF_SPACE is not set, client should collect the
1185 * space usage from OSSs by itself
1186 */
1187 if (cmd == Q_GETQUOTA &&
1188 !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1189 !oqctl->qc_dqblk.dqb_curspace) {
1190 struct obd_quotactl *oqctl_tmp;
1191
1192 oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
1193 if (!oqctl_tmp) {
1194 rc = -ENOMEM;
1195 goto out;
1196 }
1197
1198 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1199 oqctl_tmp->qc_id = oqctl->qc_id;
1200 oqctl_tmp->qc_type = oqctl->qc_type;
1201
1202 /* collect space usage from OSTs */
1203 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1204 rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1205 if (!rc || rc == -EREMOTEIO) {
1206 oqctl->qc_dqblk.dqb_curspace =
1207 oqctl_tmp->qc_dqblk.dqb_curspace;
1208 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1209 }
1210
1211 /* collect space & inode usage from MDTs */
1212 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1213 oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1214 rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1215 if (!rc || rc == -EREMOTEIO) {
1216 oqctl->qc_dqblk.dqb_curspace +=
1217 oqctl_tmp->qc_dqblk.dqb_curspace;
1218 oqctl->qc_dqblk.dqb_curinodes =
1219 oqctl_tmp->qc_dqblk.dqb_curinodes;
1220 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1221 } else {
1222 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1223 }
1224
1225 kfree(oqctl_tmp);
1226 }
1227 out:
1228 QCTL_COPY(qctl, oqctl);
1229 kfree(oqctl);
1230 }
1231
1232 return rc;
1233 }
1234
1235 /* This function tries to get a single name component,
1236 * to send to the server. No actual path traversal involved,
1237 * so we limit to NAME_MAX
1238 */
1239 static char *ll_getname(const char __user *filename)
1240 {
1241 int ret = 0, len;
1242 char *tmp;
1243
1244 tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
1245 if (!tmp)
1246 return ERR_PTR(-ENOMEM);
1247
1248 len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1249 if (len < 0)
1250 ret = len;
1251 else if (len == 0)
1252 ret = -ENOENT;
1253 else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
1254 ret = -ENAMETOOLONG;
1255
1256 if (ret) {
1257 kfree(tmp);
1258 tmp = ERR_PTR(ret);
1259 }
1260 return tmp;
1261 }
1262
1263 #define ll_putname(filename) kfree(filename)
1264
1265 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1266 {
1267 struct inode *inode = file_inode(file);
1268 struct ll_sb_info *sbi = ll_i2sbi(inode);
1269 struct obd_ioctl_data *data;
1270 int rc = 0;
1271
1272 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
1273 PFID(ll_inode2fid(inode)), inode, cmd);
1274
1275 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1276 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1277 return -ENOTTY;
1278
1279 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1280 switch (cmd) {
1281 case FSFILT_IOC_GETFLAGS:
1282 case FSFILT_IOC_SETFLAGS:
1283 return ll_iocontrol(inode, file, cmd, arg);
1284 case FSFILT_IOC_GETVERSION_OLD:
1285 case FSFILT_IOC_GETVERSION:
1286 return put_user(inode->i_generation, (int __user *)arg);
1287 /* We need to special case any other ioctls we want to handle,
1288 * to send them to the MDS/OST as appropriate and to properly
1289 * network encode the arg field.
1290 case FSFILT_IOC_SETVERSION_OLD:
1291 case FSFILT_IOC_SETVERSION:
1292 */
1293 case LL_IOC_GET_MDTIDX: {
1294 int mdtidx;
1295
1296 mdtidx = ll_get_mdt_idx(inode);
1297 if (mdtidx < 0)
1298 return mdtidx;
1299
1300 if (put_user((int)mdtidx, (int __user *)arg))
1301 return -EFAULT;
1302
1303 return 0;
1304 }
1305 case IOC_MDC_LOOKUP: {
1306 struct ptlrpc_request *request = NULL;
1307 int namelen, len = 0;
1308 char *buf = NULL;
1309 char *filename;
1310 struct md_op_data *op_data;
1311
1312 rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
1313 if (rc)
1314 return rc;
1315 data = (void *)buf;
1316
1317 filename = data->ioc_inlbuf1;
1318 namelen = strlen(filename);
1319
1320 if (namelen < 1) {
1321 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1322 rc = -EINVAL;
1323 goto out_free;
1324 }
1325
1326 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1327 0, LUSTRE_OPC_ANY, NULL);
1328 if (IS_ERR(op_data)) {
1329 rc = PTR_ERR(op_data);
1330 goto out_free;
1331 }
1332
1333 op_data->op_valid = OBD_MD_FLID;
1334 rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1335 ll_finish_md_op_data(op_data);
1336 if (rc < 0) {
1337 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1338 goto out_free;
1339 }
1340 ptlrpc_req_finished(request);
1341 out_free:
1342 obd_ioctl_freedata(buf, len);
1343 return rc;
1344 }
1345 case LL_IOC_LMV_SETSTRIPE: {
1346 struct lmv_user_md *lum;
1347 char *buf = NULL;
1348 char *filename;
1349 int namelen = 0;
1350 int lumlen = 0;
1351 int len;
1352 int rc;
1353
1354 rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
1355 if (rc)
1356 return rc;
1357
1358 data = (void *)buf;
1359 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
1360 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
1361 rc = -EINVAL;
1362 goto lmv_out_free;
1363 }
1364
1365 filename = data->ioc_inlbuf1;
1366 namelen = data->ioc_inllen1;
1367
1368 if (namelen < 1) {
1369 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1370 rc = -EINVAL;
1371 goto lmv_out_free;
1372 }
1373 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
1374 lumlen = data->ioc_inllen2;
1375
1376 if (lum->lum_magic != LMV_USER_MAGIC ||
1377 lumlen != sizeof(*lum)) {
1378 CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
1379 filename, lum->lum_magic, lumlen, -EFAULT);
1380 rc = -EINVAL;
1381 goto lmv_out_free;
1382 }
1383
1384 /**
1385 * ll_dir_setdirstripe will be used to set dir stripe
1386 * mdc_create--->mdt_reint_create (with dirstripe)
1387 */
1388 rc = ll_dir_setdirstripe(inode, lum, filename);
1389 lmv_out_free:
1390 obd_ioctl_freedata(buf, len);
1391 return rc;
1392 }
1393 case LL_IOC_LOV_SETSTRIPE: {
1394 struct lov_user_md_v3 lumv3;
1395 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1396 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
1397 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
1398
1399 int set_default = 0;
1400
1401 LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1402 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1403 sizeof(lumv3p->lmm_objects[0]));
1404 /* first try with v1 which is smaller than v3 */
1405 if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1406 return -EFAULT;
1407
1408 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1409 if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1410 return -EFAULT;
1411 }
1412
1413 if (is_root_inode(inode))
1414 set_default = 1;
1415
1416 /* in v1 and v3 cases lumv1 points to data */
1417 rc = ll_dir_setstripe(inode, lumv1, set_default);
1418
1419 return rc;
1420 }
1421 case LL_IOC_LMV_GETSTRIPE: {
1422 struct lmv_user_md __user *lump = (void __user *)arg;
1423 struct lmv_user_md lum;
1424 struct lmv_user_md *tmp;
1425 int lum_size;
1426 int rc = 0;
1427 int mdtindex;
1428
1429 if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
1430 return -EFAULT;
1431
1432 if (lum.lum_magic != LMV_MAGIC_V1)
1433 return -EINVAL;
1434
1435 lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
1436 tmp = kzalloc(lum_size, GFP_NOFS);
1437 if (!tmp) {
1438 rc = -ENOMEM;
1439 goto free_lmv;
1440 }
1441
1442 *tmp = lum;
1443 tmp->lum_type = LMV_STRIPE_TYPE;
1444 tmp->lum_stripe_count = 1;
1445 mdtindex = ll_get_mdt_idx(inode);
1446 if (mdtindex < 0) {
1447 rc = -ENOMEM;
1448 goto free_lmv;
1449 }
1450
1451 tmp->lum_stripe_offset = mdtindex;
1452 tmp->lum_objects[0].lum_mds = mdtindex;
1453 memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
1454 sizeof(struct lu_fid));
1455 if (copy_to_user((void __user *)arg, tmp, lum_size)) {
1456 rc = -EFAULT;
1457 goto free_lmv;
1458 }
1459 free_lmv:
1460 kfree(tmp);
1461 return rc;
1462 }
1463 case LL_IOC_LOV_SWAP_LAYOUTS:
1464 return -EPERM;
1465 case LL_IOC_OBD_STATFS:
1466 return ll_obd_statfs(inode, (void __user *)arg);
1467 case LL_IOC_LOV_GETSTRIPE:
1468 case LL_IOC_MDC_GETINFO:
1469 case IOC_MDC_GETFILEINFO:
1470 case IOC_MDC_GETFILESTRIPE: {
1471 struct ptlrpc_request *request = NULL;
1472 struct lov_user_md __user *lump;
1473 struct lov_mds_md *lmm = NULL;
1474 struct mdt_body *body;
1475 char *filename = NULL;
1476 int lmmsize;
1477
1478 if (cmd == IOC_MDC_GETFILEINFO ||
1479 cmd == IOC_MDC_GETFILESTRIPE) {
1480 filename = ll_getname((const char __user *)arg);
1481 if (IS_ERR(filename))
1482 return PTR_ERR(filename);
1483
1484 rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1485 &lmmsize, &request);
1486 } else {
1487 rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1488 }
1489
1490 if (request) {
1491 body = req_capsule_server_get(&request->rq_pill,
1492 &RMF_MDT_BODY);
1493 LASSERT(body);
1494 } else {
1495 goto out_req;
1496 }
1497
1498 if (rc < 0) {
1499 if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1500 cmd == LL_IOC_MDC_GETINFO)) {
1501 rc = 0;
1502 goto skip_lmm;
1503 } else {
1504 goto out_req;
1505 }
1506 }
1507
1508 if (cmd == IOC_MDC_GETFILESTRIPE ||
1509 cmd == LL_IOC_LOV_GETSTRIPE) {
1510 lump = (struct lov_user_md __user *)arg;
1511 } else {
1512 struct lov_user_mds_data __user *lmdp;
1513
1514 lmdp = (struct lov_user_mds_data __user *)arg;
1515 lump = &lmdp->lmd_lmm;
1516 }
1517 if (copy_to_user(lump, lmm, lmmsize)) {
1518 if (copy_to_user(lump, lmm, sizeof(*lump))) {
1519 rc = -EFAULT;
1520 goto out_req;
1521 }
1522 rc = -EOVERFLOW;
1523 }
1524 skip_lmm:
1525 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1526 struct lov_user_mds_data __user *lmdp;
1527 lstat_t st = { 0 };
1528
1529 st.st_dev = inode->i_sb->s_dev;
1530 st.st_mode = body->mode;
1531 st.st_nlink = body->nlink;
1532 st.st_uid = body->uid;
1533 st.st_gid = body->gid;
1534 st.st_rdev = body->rdev;
1535 st.st_size = body->size;
1536 st.st_blksize = PAGE_SIZE;
1537 st.st_blocks = body->blocks;
1538 st.st_atime = body->atime;
1539 st.st_mtime = body->mtime;
1540 st.st_ctime = body->ctime;
1541 st.st_ino = inode->i_ino;
1542
1543 lmdp = (struct lov_user_mds_data __user *)arg;
1544 if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
1545 rc = -EFAULT;
1546 goto out_req;
1547 }
1548 }
1549
1550 out_req:
1551 ptlrpc_req_finished(request);
1552 if (filename)
1553 ll_putname(filename);
1554 return rc;
1555 }
1556 case IOC_LOV_GETINFO: {
1557 struct lov_user_mds_data __user *lumd;
1558 struct lov_stripe_md *lsm;
1559 struct lov_user_md __user *lum;
1560 struct lov_mds_md *lmm;
1561 int lmmsize;
1562 lstat_t st;
1563
1564 lumd = (struct lov_user_mds_data __user *)arg;
1565 lum = &lumd->lmd_lmm;
1566
1567 rc = ll_get_max_mdsize(sbi, &lmmsize);
1568 if (rc)
1569 return rc;
1570
1571 lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
1572 if (!lmm)
1573 return -ENOMEM;
1574 if (copy_from_user(lmm, lum, lmmsize)) {
1575 rc = -EFAULT;
1576 goto free_lmm;
1577 }
1578
1579 switch (lmm->lmm_magic) {
1580 case LOV_USER_MAGIC_V1:
1581 if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
1582 break;
1583 /* swab objects first so that stripes num will be sane */
1584 lustre_swab_lov_user_md_objects(
1585 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1586 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1587 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1588 break;
1589 case LOV_USER_MAGIC_V3:
1590 if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
1591 break;
1592 /* swab objects first so that stripes num will be sane */
1593 lustre_swab_lov_user_md_objects(
1594 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1595 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1596 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1597 break;
1598 default:
1599 rc = -EINVAL;
1600 goto free_lmm;
1601 }
1602
1603 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1604 if (rc < 0) {
1605 rc = -ENOMEM;
1606 goto free_lmm;
1607 }
1608
1609 /* Perform glimpse_size operation. */
1610 memset(&st, 0, sizeof(st));
1611
1612 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1613 if (rc)
1614 goto free_lsm;
1615
1616 if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
1617 rc = -EFAULT;
1618 goto free_lsm;
1619 }
1620
1621 free_lsm:
1622 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1623 free_lmm:
1624 kvfree(lmm);
1625 return rc;
1626 }
1627 case OBD_IOC_LLOG_CATINFO: {
1628 return -EOPNOTSUPP;
1629 }
1630 case OBD_IOC_QUOTACHECK: {
1631 struct obd_quotactl *oqctl;
1632 int error = 0;
1633
1634 if (!capable(CFS_CAP_SYS_ADMIN) ||
1635 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1636 return -EPERM;
1637
1638 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1639 if (!oqctl)
1640 return -ENOMEM;
1641 oqctl->qc_type = arg;
1642 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1643 if (rc < 0) {
1644 CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1645 error = rc;
1646 }
1647
1648 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1649 if (rc < 0)
1650 CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1651
1652 kfree(oqctl);
1653 return error ?: rc;
1654 }
1655 case OBD_IOC_POLL_QUOTACHECK: {
1656 struct if_quotacheck *check;
1657
1658 if (!capable(CFS_CAP_SYS_ADMIN) ||
1659 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1660 return -EPERM;
1661
1662 check = kzalloc(sizeof(*check), GFP_NOFS);
1663 if (!check)
1664 return -ENOMEM;
1665
1666 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1667 NULL);
1668 if (rc) {
1669 CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1670 if (copy_to_user((void __user *)arg, check,
1671 sizeof(*check)))
1672 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1673 goto out_poll;
1674 }
1675
1676 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1677 NULL);
1678 if (rc) {
1679 CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1680 if (copy_to_user((void __user *)arg, check,
1681 sizeof(*check)))
1682 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1683 goto out_poll;
1684 }
1685 out_poll:
1686 kfree(check);
1687 return rc;
1688 }
1689 case LL_IOC_QUOTACTL: {
1690 struct if_quotactl *qctl;
1691
1692 qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
1693 if (!qctl)
1694 return -ENOMEM;
1695
1696 if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) {
1697 rc = -EFAULT;
1698 goto out_quotactl;
1699 }
1700
1701 rc = quotactl_ioctl(sbi, qctl);
1702
1703 if (rc == 0 && copy_to_user((void __user *)arg, qctl,
1704 sizeof(*qctl)))
1705 rc = -EFAULT;
1706
1707 out_quotactl:
1708 kfree(qctl);
1709 return rc;
1710 }
1711 case OBD_IOC_GETDTNAME:
1712 case OBD_IOC_GETMDNAME:
1713 return ll_get_obd_name(inode, cmd, arg);
1714 case LL_IOC_FLUSHCTX:
1715 return ll_flush_ctx(inode);
1716 #ifdef CONFIG_FS_POSIX_ACL
1717 case LL_IOC_RMTACL: {
1718 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1719 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1720
1721 rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1722 if (!rc)
1723 fd->fd_flags |= LL_FILE_RMTACL;
1724 return rc;
1725 } else {
1726 return 0;
1727 }
1728 }
1729 #endif
1730 case LL_IOC_GETOBDCOUNT: {
1731 int count, vallen;
1732 struct obd_export *exp;
1733
1734 if (copy_from_user(&count, (int __user *)arg, sizeof(int)))
1735 return -EFAULT;
1736
1737 /* get ost count when count is zero, get mdt count otherwise */
1738 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1739 vallen = sizeof(count);
1740 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1741 KEY_TGT_COUNT, &vallen, &count, NULL);
1742 if (rc) {
1743 CERROR("get target count failed: %d\n", rc);
1744 return rc;
1745 }
1746
1747 if (copy_to_user((int __user *)arg, &count, sizeof(int)))
1748 return -EFAULT;
1749
1750 return 0;
1751 }
1752 case LL_IOC_PATH2FID:
1753 if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
1754 sizeof(struct lu_fid)))
1755 return -EFAULT;
1756 return 0;
1757 case LL_IOC_GET_CONNECT_FLAGS: {
1758 return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL,
1759 (void __user *)arg);
1760 }
1761 case OBD_IOC_CHANGELOG_SEND:
1762 case OBD_IOC_CHANGELOG_CLEAR:
1763 if (!capable(CFS_CAP_SYS_ADMIN))
1764 return -EPERM;
1765
1766 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
1767 sizeof(struct ioc_changelog));
1768 return rc;
1769 case OBD_IOC_FID2PATH:
1770 return ll_fid2path(inode, (void __user *)arg);
1771 case LL_IOC_HSM_REQUEST: {
1772 struct hsm_user_request *hur;
1773 ssize_t totalsize;
1774
1775 hur = memdup_user((void __user *)arg, sizeof(*hur));
1776 if (IS_ERR(hur))
1777 return PTR_ERR(hur);
1778
1779 /* Compute the whole struct size */
1780 totalsize = hur_len(hur);
1781 kfree(hur);
1782 if (totalsize < 0)
1783 return -E2BIG;
1784
1785 /* Final size will be more than double totalsize */
1786 if (totalsize >= MDS_MAXREQSIZE / 3)
1787 return -E2BIG;
1788
1789 hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
1790 if (!hur)
1791 return -ENOMEM;
1792
1793 /* Copy the whole struct */
1794 if (copy_from_user(hur, (void __user *)arg, totalsize)) {
1795 kvfree(hur);
1796 return -EFAULT;
1797 }
1798
1799 if (hur->hur_request.hr_action == HUA_RELEASE) {
1800 const struct lu_fid *fid;
1801 struct inode *f;
1802 int i;
1803
1804 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
1805 fid = &hur->hur_user_item[i].hui_fid;
1806 f = search_inode_for_lustre(inode->i_sb, fid);
1807 if (IS_ERR(f)) {
1808 rc = PTR_ERR(f);
1809 break;
1810 }
1811
1812 rc = ll_hsm_release(f);
1813 iput(f);
1814 if (rc != 0)
1815 break;
1816 }
1817 } else {
1818 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
1819 hur, NULL);
1820 }
1821
1822 kvfree(hur);
1823
1824 return rc;
1825 }
1826 case LL_IOC_HSM_PROGRESS: {
1827 struct hsm_progress_kernel hpk;
1828 struct hsm_progress hp;
1829
1830 if (copy_from_user(&hp, (void __user *)arg, sizeof(hp)))
1831 return -EFAULT;
1832
1833 hpk.hpk_fid = hp.hp_fid;
1834 hpk.hpk_cookie = hp.hp_cookie;
1835 hpk.hpk_extent = hp.hp_extent;
1836 hpk.hpk_flags = hp.hp_flags;
1837 hpk.hpk_errval = hp.hp_errval;
1838 hpk.hpk_data_version = 0;
1839
1840 /* File may not exist in Lustre; all progress
1841 * reported to Lustre root
1842 */
1843 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
1844 NULL);
1845 return rc;
1846 }
1847 case LL_IOC_HSM_CT_START:
1848 if (!capable(CFS_CAP_SYS_ADMIN))
1849 return -EPERM;
1850
1851 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
1852 sizeof(struct lustre_kernelcomm));
1853 return rc;
1854
1855 case LL_IOC_HSM_COPY_START: {
1856 struct hsm_copy *copy;
1857 int rc;
1858
1859 copy = memdup_user((char __user *)arg, sizeof(*copy));
1860 if (IS_ERR(copy))
1861 return PTR_ERR(copy);
1862
1863 rc = ll_ioc_copy_start(inode->i_sb, copy);
1864 if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
1865 rc = -EFAULT;
1866
1867 kfree(copy);
1868 return rc;
1869 }
1870 case LL_IOC_HSM_COPY_END: {
1871 struct hsm_copy *copy;
1872 int rc;
1873
1874 copy = memdup_user((char __user *)arg, sizeof(*copy));
1875 if (IS_ERR(copy))
1876 return PTR_ERR(copy);
1877
1878 rc = ll_ioc_copy_end(inode->i_sb, copy);
1879 if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
1880 rc = -EFAULT;
1881
1882 kfree(copy);
1883 return rc;
1884 }
1885 default:
1886 return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
1887 (void __user *)arg);
1888 }
1889 }
1890
1891 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1892 {
1893 struct inode *inode = file->f_mapping->host;
1894 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1895 struct ll_sb_info *sbi = ll_i2sbi(inode);
1896 int api32 = ll_need_32bit_api(sbi);
1897 loff_t ret = -EINVAL;
1898
1899 switch (origin) {
1900 case SEEK_SET:
1901 break;
1902 case SEEK_CUR:
1903 offset += file->f_pos;
1904 break;
1905 case SEEK_END:
1906 if (offset > 0)
1907 goto out;
1908 if (api32)
1909 offset += LL_DIR_END_OFF_32BIT;
1910 else
1911 offset += LL_DIR_END_OFF;
1912 break;
1913 default:
1914 goto out;
1915 }
1916
1917 if (offset >= 0 &&
1918 ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1919 (!api32 && offset <= LL_DIR_END_OFF))) {
1920 if (offset != file->f_pos) {
1921 if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1922 (!api32 && offset == LL_DIR_END_OFF))
1923 fd->lfd_pos = MDS_DIR_END_OFF;
1924 else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1925 fd->lfd_pos = offset << 32;
1926 else
1927 fd->lfd_pos = offset;
1928 file->f_pos = offset;
1929 file->f_version = 0;
1930 }
1931 ret = offset;
1932 }
1933 goto out;
1934
1935 out:
1936 return ret;
1937 }
1938
1939 static int ll_dir_open(struct inode *inode, struct file *file)
1940 {
1941 return ll_file_open(inode, file);
1942 }
1943
1944 static int ll_dir_release(struct inode *inode, struct file *file)
1945 {
1946 return ll_file_release(inode, file);
1947 }
1948
1949 const struct file_operations ll_dir_operations = {
1950 .llseek = ll_dir_seek,
1951 .open = ll_dir_open,
1952 .release = ll_dir_release,
1953 .read = generic_read_dir,
1954 .iterate_shared = ll_readdir,
1955 .unlocked_ioctl = ll_dir_ioctl,
1956 .fsync = ll_fsync,
1957 };
This page took 0.11832 seconds and 6 git commands to generate.