staging: lustre: Coalesce string fragments
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/llite_lib.c
37 *
38 * Lustre Light Super operations
39 */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #include <linux/module.h>
44 #include <linux/statfs.h>
45 #include <linux/types.h>
46 #include <linux/mm.h>
47
48 #include "../include/lustre_lite.h"
49 #include "../include/lustre_ha.h"
50 #include "../include/lustre_dlm.h"
51 #include "../include/lprocfs_status.h"
52 #include "../include/lustre_disk.h"
53 #include "../include/lustre_param.h"
54 #include "../include/lustre_log.h"
55 #include "../include/cl_object.h"
56 #include "../include/obd_cksum.h"
57 #include "llite_internal.h"
58
59 struct kmem_cache *ll_file_data_slab;
60 struct proc_dir_entry *proc_lustre_fs_root;
61
62 static LIST_HEAD(ll_super_blocks);
63 static DEFINE_SPINLOCK(ll_sb_lock);
64
65 #ifndef log2
66 #define log2(n) ffz(~(n))
67 #endif
68
69 static struct ll_sb_info *ll_init_sbi(void)
70 {
71 struct ll_sb_info *sbi = NULL;
72 unsigned long pages;
73 unsigned long lru_page_max;
74 struct sysinfo si;
75 class_uuid_t uuid;
76 int i;
77
78 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
79 if (!sbi)
80 return NULL;
81
82 spin_lock_init(&sbi->ll_lock);
83 mutex_init(&sbi->ll_lco.lco_lock);
84 spin_lock_init(&sbi->ll_pp_extent_lock);
85 spin_lock_init(&sbi->ll_process_lock);
86 sbi->ll_rw_stats_on = 0;
87
88 si_meminfo(&si);
89 pages = si.totalram - si.totalhigh;
90 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
91 lru_page_max = pages / 2;
92 } else {
93 lru_page_max = (pages / 4) * 3;
94 }
95
96 /* initialize lru data */
97 atomic_set(&sbi->ll_cache.ccc_users, 0);
98 sbi->ll_cache.ccc_lru_max = lru_page_max;
99 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
100 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
101 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
102
103 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
104 SBI_DEFAULT_READAHEAD_MAX);
105 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
106 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
107 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
108 INIT_LIST_HEAD(&sbi->ll_conn_chain);
109 INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
110
111 ll_generate_random_uuid(uuid);
112 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
113 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
114
115 spin_lock(&ll_sb_lock);
116 list_add_tail(&sbi->ll_list, &ll_super_blocks);
117 spin_unlock(&ll_sb_lock);
118
119 sbi->ll_flags |= LL_SBI_VERBOSE;
120 sbi->ll_flags |= LL_SBI_CHECKSUM;
121
122 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
123
124 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
125 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
126 pp_r_hist.oh_lock);
127 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
128 pp_w_hist.oh_lock);
129 }
130
131 /* metadata statahead is enabled by default */
132 sbi->ll_sa_max = LL_SA_RPC_DEF;
133 atomic_set(&sbi->ll_sa_total, 0);
134 atomic_set(&sbi->ll_sa_wrong, 0);
135 atomic_set(&sbi->ll_agl_total, 0);
136 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
137
138 return sbi;
139 }
140
141 static void ll_free_sbi(struct super_block *sb)
142 {
143 struct ll_sb_info *sbi = ll_s2sbi(sb);
144
145 if (sbi != NULL) {
146 spin_lock(&ll_sb_lock);
147 list_del(&sbi->ll_list);
148 spin_unlock(&ll_sb_lock);
149 OBD_FREE(sbi, sizeof(*sbi));
150 }
151 }
152
153 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
154 struct vfsmount *mnt)
155 {
156 struct inode *root = NULL;
157 struct ll_sb_info *sbi = ll_s2sbi(sb);
158 struct obd_device *obd;
159 struct obd_capa *oc = NULL;
160 struct obd_statfs *osfs = NULL;
161 struct ptlrpc_request *request = NULL;
162 struct obd_connect_data *data = NULL;
163 struct obd_uuid *uuid;
164 struct md_op_data *op_data;
165 struct lustre_md lmd;
166 u64 valid;
167 int size, err, checksum;
168
169 obd = class_name2obd(md);
170 if (!obd) {
171 CERROR("MD %s: not setup or attached\n", md);
172 return -EINVAL;
173 }
174
175 data = kzalloc(sizeof(*data), GFP_NOFS);
176 if (!data)
177 return -ENOMEM;
178
179 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
180 if (!osfs) {
181 OBD_FREE_PTR(data);
182 return -ENOMEM;
183 }
184
185 if (proc_lustre_fs_root) {
186 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
187 dt, md);
188 if (err < 0)
189 CERROR("could not register mount in /proc/fs/lustre\n");
190 }
191
192 /* indicate the features supported by this client */
193 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
194 OBD_CONNECT_ATTRFID |
195 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
196 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
197 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
198 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
199 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
200 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
201 OBD_CONNECT_EINPROGRESS |
202 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
203 OBD_CONNECT_LAYOUTLOCK |
204 OBD_CONNECT_PINGLESS |
205 OBD_CONNECT_MAX_EASIZE |
206 OBD_CONNECT_FLOCK_DEAD |
207 OBD_CONNECT_DISP_STRIPE;
208
209 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
210 data->ocd_connect_flags |= OBD_CONNECT_SOM;
211
212 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
213 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
214 #ifdef CONFIG_FS_POSIX_ACL
215 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
216 #endif
217
218 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
219 /* flag mdc connection as lightweight, only used for test
220 * purpose, use with care */
221 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
222
223 data->ocd_ibits_known = MDS_INODELOCK_FULL;
224 data->ocd_version = LUSTRE_VERSION_CODE;
225
226 if (sb->s_flags & MS_RDONLY)
227 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
228 if (sbi->ll_flags & LL_SBI_USER_XATTR)
229 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
230
231 #ifdef HAVE_MS_FLOCK_LOCK
232 /* force vfs to use lustre handler for flock() calls - bug 10743 */
233 sb->s_flags |= MS_FLOCK_LOCK;
234 #endif
235 #ifdef MS_HAS_NEW_AOPS
236 sb->s_flags |= MS_HAS_NEW_AOPS;
237 #endif
238
239 if (sbi->ll_flags & LL_SBI_FLOCK)
240 sbi->ll_fop = &ll_file_operations_flock;
241 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
242 sbi->ll_fop = &ll_file_operations;
243 else
244 sbi->ll_fop = &ll_file_operations_noflock;
245
246 /* real client */
247 data->ocd_connect_flags |= OBD_CONNECT_REAL;
248 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
249 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
250
251 data->ocd_brw_size = MD_MAX_BRW_SIZE;
252
253 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
254 data, NULL);
255 if (err == -EBUSY) {
256 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
257 md);
258 goto out;
259 } else if (err) {
260 CERROR("cannot connect to %s: rc = %d\n", md, err);
261 goto out;
262 }
263
264 sbi->ll_md_exp->exp_connect_data = *data;
265
266 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
267 LUSTRE_SEQ_METADATA);
268 if (err) {
269 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
270 sbi->ll_md_exp->exp_obd->obd_name, err);
271 goto out_md;
272 }
273
274 /* For mount, we only need fs info from MDT0, and also in DNE, it
275 * can make sure the client can be mounted as long as MDT0 is
276 * available */
277 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
278 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
279 OBD_STATFS_FOR_MDT0);
280 if (err)
281 goto out_md_fid;
282
283 /* This needs to be after statfs to ensure connect has finished.
284 * Note that "data" does NOT contain the valid connect reply.
285 * If connecting to a 1.8 server there will be no LMV device, so
286 * we can access the MDC export directly and exp_connect_flags will
287 * be non-zero, but if accessing an upgraded 2.1 server it will
288 * have the correct flags filled in.
289 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
290 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
291 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
292 valid != CLIENT_CONNECT_MDT_REQD) {
293 char *buf;
294
295 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
296 obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
297 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
298 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
299 sbi->ll_md_exp->exp_obd->obd_name, buf);
300 OBD_FREE(buf, PAGE_CACHE_SIZE);
301 err = -EPROTO;
302 goto out_md_fid;
303 }
304
305 size = sizeof(*data);
306 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
307 KEY_CONN_DATA, &size, data, NULL);
308 if (err) {
309 CERROR("%s: Get connect data failed: rc = %d\n",
310 sbi->ll_md_exp->exp_obd->obd_name, err);
311 goto out_md_fid;
312 }
313
314 LASSERT(osfs->os_bsize);
315 sb->s_blocksize = osfs->os_bsize;
316 sb->s_blocksize_bits = log2(osfs->os_bsize);
317 sb->s_magic = LL_SUPER_MAGIC;
318 sb->s_maxbytes = MAX_LFS_FILESIZE;
319 sbi->ll_namelen = osfs->os_namelen;
320 sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
321
322 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
323 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
324 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
325 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
326 }
327
328 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
329 #ifdef MS_POSIXACL
330 sb->s_flags |= MS_POSIXACL;
331 #endif
332 sbi->ll_flags |= LL_SBI_ACL;
333 } else {
334 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
335 #ifdef MS_POSIXACL
336 sb->s_flags &= ~MS_POSIXACL;
337 #endif
338 sbi->ll_flags &= ~LL_SBI_ACL;
339 }
340
341 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
342 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
343 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
344 LCONSOLE_INFO("client is set as remote by default.\n");
345 }
346 } else {
347 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
348 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
349 LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
350 }
351 }
352
353 if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
354 LCONSOLE_INFO("client enabled MDS capability!\n");
355 sbi->ll_flags |= LL_SBI_MDS_CAPA;
356 }
357
358 if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
359 LCONSOLE_INFO("client enabled OSS capability!\n");
360 sbi->ll_flags |= LL_SBI_OSS_CAPA;
361 }
362
363 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
364 sbi->ll_flags |= LL_SBI_64BIT_HASH;
365
366 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
367 sbi->ll_md_brw_size = data->ocd_brw_size;
368 else
369 sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
370
371 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
372 LCONSOLE_INFO("Layout lock feature supported.\n");
373 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
374 }
375
376 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
377 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
378 LCONSOLE_INFO(
379 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
380 dt);
381 } else {
382 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
383 sbi->ll_xattr_cache_enabled = 1;
384 }
385 }
386
387 obd = class_name2obd(dt);
388 if (!obd) {
389 CERROR("DT %s: not setup or attached\n", dt);
390 err = -ENODEV;
391 goto out_md_fid;
392 }
393
394 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
395 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
396 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
397 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
398 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
399 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
400 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
401 OBD_CONNECT_MAXBYTES |
402 OBD_CONNECT_EINPROGRESS |
403 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
404 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
405
406 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
407 data->ocd_connect_flags |= OBD_CONNECT_SOM;
408
409 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
410 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
411 * disabled by default, because it can still be enabled on the
412 * fly via /proc. As a consequence, we still need to come to an
413 * agreement on the supported algorithms at connect time */
414 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
415
416 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
417 data->ocd_cksum_types = OBD_CKSUM_ADLER;
418 else
419 data->ocd_cksum_types = cksum_types_supported_client();
420 }
421
422 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
423 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
424 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
425
426 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
427 data->ocd_connect_flags,
428 data->ocd_version, data->ocd_grant);
429
430 obd->obd_upcall.onu_owner = &sbi->ll_lco;
431 obd->obd_upcall.onu_upcall = cl_ocd_update;
432
433 data->ocd_brw_size = DT_MAX_BRW_SIZE;
434
435 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
436 NULL);
437 if (err == -EBUSY) {
438 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
439 dt);
440 goto out_md;
441 } else if (err) {
442 CERROR("%s: Cannot connect to %s: rc = %d\n",
443 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
444 goto out_md;
445 }
446
447 sbi->ll_dt_exp->exp_connect_data = *data;
448
449 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
450 LUSTRE_SEQ_METADATA);
451 if (err) {
452 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
453 sbi->ll_dt_exp->exp_obd->obd_name, err);
454 goto out_dt;
455 }
456
457 mutex_lock(&sbi->ll_lco.lco_lock);
458 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
459 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
460 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
461 mutex_unlock(&sbi->ll_lco.lco_lock);
462
463 fid_zero(&sbi->ll_root_fid);
464 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
465 if (err) {
466 CERROR("cannot mds_connect: rc = %d\n", err);
467 goto out_lock_cn_cb;
468 }
469 if (!fid_is_sane(&sbi->ll_root_fid)) {
470 CERROR("%s: Invalid root fid "DFID" during mount\n",
471 sbi->ll_md_exp->exp_obd->obd_name,
472 PFID(&sbi->ll_root_fid));
473 err = -EINVAL;
474 goto out_lock_cn_cb;
475 }
476 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
477
478 sb->s_op = &lustre_super_operations;
479 #if THREAD_SIZE >= 8192 /*b=17630*/
480 sb->s_export_op = &lustre_export_operations;
481 #endif
482
483 /* make root inode
484 * XXX: move this to after cbd setup? */
485 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
486 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
487 valid |= OBD_MD_FLRMTPERM;
488 else if (sbi->ll_flags & LL_SBI_ACL)
489 valid |= OBD_MD_FLACL;
490
491 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
492 if (!op_data) {
493 err = -ENOMEM;
494 goto out_lock_cn_cb;
495 }
496
497 op_data->op_fid1 = sbi->ll_root_fid;
498 op_data->op_mode = 0;
499 op_data->op_capa1 = oc;
500 op_data->op_valid = valid;
501
502 err = md_getattr(sbi->ll_md_exp, op_data, &request);
503 if (oc)
504 capa_put(oc);
505 OBD_FREE_PTR(op_data);
506 if (err) {
507 CERROR("%s: md_getattr failed for root: rc = %d\n",
508 sbi->ll_md_exp->exp_obd->obd_name, err);
509 goto out_lock_cn_cb;
510 }
511
512 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
513 sbi->ll_md_exp, &lmd);
514 if (err) {
515 CERROR("failed to understand root inode md: rc = %d\n", err);
516 ptlrpc_req_finished(request);
517 goto out_lock_cn_cb;
518 }
519
520 LASSERT(fid_is_sane(&sbi->ll_root_fid));
521 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
522 sbi->ll_flags & LL_SBI_32BIT_API),
523 &lmd);
524 md_free_lustre_md(sbi->ll_md_exp, &lmd);
525 ptlrpc_req_finished(request);
526
527 if (root == NULL || IS_ERR(root)) {
528 if (lmd.lsm)
529 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
530 #ifdef CONFIG_FS_POSIX_ACL
531 if (lmd.posix_acl) {
532 posix_acl_release(lmd.posix_acl);
533 lmd.posix_acl = NULL;
534 }
535 #endif
536 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
537 root = NULL;
538 CERROR("lustre_lite: bad iget4 for root\n");
539 goto out_root;
540 }
541
542 err = ll_close_thread_start(&sbi->ll_lcq);
543 if (err) {
544 CERROR("cannot start close thread: rc %d\n", err);
545 goto out_root;
546 }
547
548 #ifdef CONFIG_FS_POSIX_ACL
549 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
550 rct_init(&sbi->ll_rct);
551 et_init(&sbi->ll_et);
552 }
553 #endif
554
555 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
556 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
557 KEY_CHECKSUM, sizeof(checksum), &checksum,
558 NULL);
559 cl_sb_init(sb);
560
561 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
562 KEY_CACHE_SET, sizeof(sbi->ll_cache),
563 &sbi->ll_cache, NULL);
564
565 sb->s_root = d_make_root(root);
566 if (sb->s_root == NULL) {
567 CERROR("%s: can't make root dentry\n",
568 ll_get_fsname(sb, NULL, 0));
569 err = -ENOMEM;
570 goto out_lock_cn_cb;
571 }
572
573 sbi->ll_sdev_orig = sb->s_dev;
574
575 /* We set sb->s_dev equal on all lustre clients in order to support
576 * NFS export clustering. NFSD requires that the FSID be the same
577 * on all clients. */
578 /* s_dev is also used in lt_compare() to compare two fs, but that is
579 * only a node-local comparison. */
580 uuid = obd_get_uuid(sbi->ll_md_exp);
581 if (uuid != NULL) {
582 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
583 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
584 }
585
586 if (data != NULL)
587 OBD_FREE_PTR(data);
588 if (osfs != NULL)
589 OBD_FREE_PTR(osfs);
590
591 return err;
592 out_root:
593 iput(root);
594 out_lock_cn_cb:
595 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
596 out_dt:
597 obd_disconnect(sbi->ll_dt_exp);
598 sbi->ll_dt_exp = NULL;
599 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
600 obd_zombie_barrier();
601 out_md_fid:
602 obd_fid_fini(sbi->ll_md_exp->exp_obd);
603 out_md:
604 obd_disconnect(sbi->ll_md_exp);
605 sbi->ll_md_exp = NULL;
606 out:
607 if (data != NULL)
608 OBD_FREE_PTR(data);
609 if (osfs != NULL)
610 OBD_FREE_PTR(osfs);
611 lprocfs_unregister_mountpoint(sbi);
612 return err;
613 }
614
615 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
616 {
617 int size, rc;
618
619 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
620 size = sizeof(int);
621 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
622 KEY_MAX_EASIZE, &size, lmmsize, NULL);
623 if (rc)
624 CERROR("Get max mdsize error rc %d \n", rc);
625
626 return rc;
627 }
628
629 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
630 {
631 int size, rc;
632
633 size = sizeof(int);
634 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
635 KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
636 if (rc)
637 CERROR("Get default mdsize error rc %d\n", rc);
638
639 return rc;
640 }
641
642 int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
643 {
644 int size, rc;
645
646 size = sizeof(int);
647 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_COOKIESIZE),
648 KEY_MAX_COOKIESIZE, &size, lmmsize, NULL);
649 if (rc)
650 CERROR("Get max cookiesize error rc %d\n", rc);
651
652 return rc;
653 }
654
655 int ll_get_default_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
656 {
657 int size, rc;
658
659 size = sizeof(int);
660 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_COOKIESIZE),
661 KEY_DEFAULT_COOKIESIZE, &size, lmmsize, NULL);
662 if (rc)
663 CERROR("Get default cookiesize error rc %d\n", rc);
664
665 return rc;
666 }
667
668 static void ll_dump_inode(struct inode *inode)
669 {
670 struct ll_d_hlist_node *tmp;
671 int dentry_count = 0;
672
673 LASSERT(inode != NULL);
674
675 ll_d_hlist_for_each(tmp, &inode->i_dentry)
676 dentry_count++;
677
678 CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
679 inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
680 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
681 }
682
683 void lustre_dump_dentry(struct dentry *dentry, int recur)
684 {
685 struct list_head *tmp;
686 int subdirs = 0;
687
688 LASSERT(dentry != NULL);
689
690 list_for_each(tmp, &dentry->d_subdirs)
691 subdirs++;
692
693 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u, flags=0x%x, fsdata=%p, %d subdirs\n",
694 dentry,
695 dentry->d_name.len, dentry->d_name.name,
696 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
697 dentry->d_parent, dentry->d_inode, d_count(dentry),
698 dentry->d_flags, dentry->d_fsdata, subdirs);
699 if (dentry->d_inode != NULL)
700 ll_dump_inode(dentry->d_inode);
701
702 if (recur == 0)
703 return;
704
705 list_for_each(tmp, &dentry->d_subdirs) {
706 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
707
708 lustre_dump_dentry(d, recur - 1);
709 }
710 }
711
712 static void client_common_put_super(struct super_block *sb)
713 {
714 struct ll_sb_info *sbi = ll_s2sbi(sb);
715
716 #ifdef CONFIG_FS_POSIX_ACL
717 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
718 et_fini(&sbi->ll_et);
719 rct_fini(&sbi->ll_rct);
720 }
721 #endif
722
723 ll_close_thread_shutdown(sbi->ll_lcq);
724
725 cl_sb_fini(sb);
726
727 list_del(&sbi->ll_conn_chain);
728
729 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
730 obd_disconnect(sbi->ll_dt_exp);
731 sbi->ll_dt_exp = NULL;
732 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
733 * see LU-2543. */
734 obd_zombie_barrier();
735
736 lprocfs_unregister_mountpoint(sbi);
737
738 obd_fid_fini(sbi->ll_md_exp->exp_obd);
739 obd_disconnect(sbi->ll_md_exp);
740 sbi->ll_md_exp = NULL;
741 }
742
743 void ll_kill_super(struct super_block *sb)
744 {
745 struct ll_sb_info *sbi;
746
747 /* not init sb ?*/
748 if (!(sb->s_flags & MS_ACTIVE))
749 return;
750
751 sbi = ll_s2sbi(sb);
752 /* we need to restore s_dev from changed for clustered NFS before
753 * put_super because new kernels have cached s_dev and change sb->s_dev
754 * in put_super not affected real removing devices */
755 if (sbi) {
756 sb->s_dev = sbi->ll_sdev_orig;
757 sbi->ll_umounting = 1;
758 }
759 }
760
761 static inline int ll_set_opt(const char *opt, char *data, int fl)
762 {
763 if (strncmp(opt, data, strlen(opt)) != 0)
764 return 0;
765 else
766 return fl;
767 }
768
769 /* non-client-specific mount options are parsed in lmd_parse */
770 static int ll_options(char *options, int *flags)
771 {
772 int tmp;
773 char *s1 = options, *s2;
774
775 if (!options)
776 return 0;
777
778 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
779
780 while (*s1) {
781 CDEBUG(D_SUPER, "next opt=%s\n", s1);
782 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
783 if (tmp) {
784 *flags |= tmp;
785 goto next;
786 }
787 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
788 if (tmp) {
789 *flags |= tmp;
790 goto next;
791 }
792 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
793 if (tmp) {
794 *flags |= tmp;
795 goto next;
796 }
797 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
798 if (tmp) {
799 *flags &= ~tmp;
800 goto next;
801 }
802 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
803 if (tmp) {
804 *flags |= tmp;
805 goto next;
806 }
807 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
808 if (tmp) {
809 *flags &= ~tmp;
810 goto next;
811 }
812 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
813 if (tmp) {
814 *flags |= tmp;
815 goto next;
816 }
817 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
818 if (tmp) {
819 *flags |= tmp;
820 goto next;
821 }
822 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
823 if (tmp) {
824 *flags &= ~tmp;
825 goto next;
826 }
827
828 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
829 if (tmp) {
830 *flags |= tmp;
831 goto next;
832 }
833 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
834 if (tmp) {
835 *flags &= ~tmp;
836 goto next;
837 }
838 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
839 if (tmp) {
840 *flags |= tmp;
841 goto next;
842 }
843 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
844 if (tmp) {
845 *flags &= ~tmp;
846 goto next;
847 }
848 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
849 if (tmp) {
850 *flags |= tmp;
851 goto next;
852 }
853 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
854 if (tmp) {
855 *flags &= ~tmp;
856 goto next;
857 }
858 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
859 if (tmp) {
860 *flags |= tmp;
861 goto next;
862 }
863 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
864 if (tmp) {
865 *flags |= tmp;
866 goto next;
867 }
868 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
869 if (tmp) {
870 *flags |= tmp;
871 goto next;
872 }
873 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
874 if (tmp) {
875 *flags &= ~tmp;
876 goto next;
877 }
878 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
879 s1);
880 return -EINVAL;
881
882 next:
883 /* Find next opt */
884 s2 = strchr(s1, ',');
885 if (s2 == NULL)
886 break;
887 s1 = s2 + 1;
888 }
889 return 0;
890 }
891
892 void ll_lli_init(struct ll_inode_info *lli)
893 {
894 lli->lli_inode_magic = LLI_INODE_MAGIC;
895 lli->lli_flags = 0;
896 lli->lli_ioepoch = 0;
897 lli->lli_maxbytes = MAX_LFS_FILESIZE;
898 spin_lock_init(&lli->lli_lock);
899 lli->lli_posix_acl = NULL;
900 lli->lli_remote_perms = NULL;
901 mutex_init(&lli->lli_rmtperm_mutex);
902 /* Do not set lli_fid, it has been initialized already. */
903 fid_zero(&lli->lli_pfid);
904 INIT_LIST_HEAD(&lli->lli_close_list);
905 INIT_LIST_HEAD(&lli->lli_oss_capas);
906 atomic_set(&lli->lli_open_count, 0);
907 lli->lli_mds_capa = NULL;
908 lli->lli_rmtperm_time = 0;
909 lli->lli_pending_och = NULL;
910 lli->lli_mds_read_och = NULL;
911 lli->lli_mds_write_och = NULL;
912 lli->lli_mds_exec_och = NULL;
913 lli->lli_open_fd_read_count = 0;
914 lli->lli_open_fd_write_count = 0;
915 lli->lli_open_fd_exec_count = 0;
916 mutex_init(&lli->lli_och_mutex);
917 spin_lock_init(&lli->lli_agl_lock);
918 lli->lli_has_smd = false;
919 spin_lock_init(&lli->lli_layout_lock);
920 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
921 lli->lli_clob = NULL;
922
923 init_rwsem(&lli->lli_xattrs_list_rwsem);
924 mutex_init(&lli->lli_xattrs_enq_lock);
925
926 LASSERT(lli->lli_vfs_inode.i_mode != 0);
927 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
928 mutex_init(&lli->lli_readdir_mutex);
929 lli->lli_opendir_key = NULL;
930 lli->lli_sai = NULL;
931 spin_lock_init(&lli->lli_sa_lock);
932 lli->lli_opendir_pid = 0;
933 } else {
934 mutex_init(&lli->lli_size_mutex);
935 lli->lli_symlink_name = NULL;
936 init_rwsem(&lli->lli_trunc_sem);
937 mutex_init(&lli->lli_write_mutex);
938 init_rwsem(&lli->lli_glimpse_sem);
939 lli->lli_glimpse_time = 0;
940 INIT_LIST_HEAD(&lli->lli_agl_list);
941 lli->lli_agl_index = 0;
942 lli->lli_async_rc = 0;
943 }
944 mutex_init(&lli->lli_layout_mutex);
945 }
946
947 static inline int ll_bdi_register(struct backing_dev_info *bdi)
948 {
949 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
950
951 bdi->name = "lustre";
952 return bdi_register(bdi, NULL, "lustre-%d",
953 atomic_inc_return(&ll_bdi_num));
954 }
955
956 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
957 {
958 struct lustre_profile *lprof = NULL;
959 struct lustre_sb_info *lsi = s2lsi(sb);
960 struct ll_sb_info *sbi;
961 char *dt = NULL, *md = NULL;
962 char *profilenm = get_profile_name(sb);
963 struct config_llog_instance *cfg;
964 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
965 const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
966 int err;
967
968 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
969
970 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
971 if (!cfg)
972 return -ENOMEM;
973
974 try_module_get(THIS_MODULE);
975
976 /* client additional sb info */
977 lsi->lsi_llsbi = sbi = ll_init_sbi();
978 if (!sbi) {
979 module_put(THIS_MODULE);
980 OBD_FREE_PTR(cfg);
981 return -ENOMEM;
982 }
983
984 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
985 if (err)
986 goto out_free;
987
988 err = bdi_init(&lsi->lsi_bdi);
989 if (err)
990 goto out_free;
991 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
992 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
993 err = ll_bdi_register(&lsi->lsi_bdi);
994 if (err)
995 goto out_free;
996
997 sb->s_bdi = &lsi->lsi_bdi;
998 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
999 sb->s_d_op = &ll_d_ops;
1000
1001 /* Generate a string unique to this super, in case some joker tries
1002 to mount the same fs at two mount points.
1003 Use the address of the super itself.*/
1004 cfg->cfg_instance = sb;
1005 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1006 cfg->cfg_callback = class_config_llog_handler;
1007 /* set up client obds */
1008 err = lustre_process_log(sb, profilenm, cfg);
1009 if (err < 0) {
1010 CERROR("Unable to process log: %d\n", err);
1011 goto out_free;
1012 }
1013
1014 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1015 lprof = class_get_profile(profilenm);
1016 if (lprof == NULL) {
1017 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
1018 profilenm);
1019 err = -EINVAL;
1020 goto out_free;
1021 }
1022 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1023 lprof->lp_md, lprof->lp_dt);
1024
1025 dt = kzalloc(strlen(lprof->lp_dt) + instlen + 2, GFP_NOFS);
1026 if (!dt) {
1027 err = -ENOMEM;
1028 goto out_free;
1029 }
1030 sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
1031
1032 md = kzalloc(strlen(lprof->lp_md) + instlen + 2, GFP_NOFS);
1033 if (!md) {
1034 err = -ENOMEM;
1035 goto out_free;
1036 }
1037 sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
1038
1039 /* connections, registrations, sb setup */
1040 err = client_common_fill_super(sb, md, dt, mnt);
1041
1042 out_free:
1043 if (md)
1044 OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
1045 if (dt)
1046 OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
1047 if (err)
1048 ll_put_super(sb);
1049 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1050 LCONSOLE_WARN("Mounted %s\n", profilenm);
1051
1052 OBD_FREE_PTR(cfg);
1053 return err;
1054 } /* ll_fill_super */
1055
1056 void ll_put_super(struct super_block *sb)
1057 {
1058 struct config_llog_instance cfg, params_cfg;
1059 struct obd_device *obd;
1060 struct lustre_sb_info *lsi = s2lsi(sb);
1061 struct ll_sb_info *sbi = ll_s2sbi(sb);
1062 char *profilenm = get_profile_name(sb);
1063 int next, force = 1;
1064
1065 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1066
1067 ll_print_capa_stat(sbi);
1068
1069 cfg.cfg_instance = sb;
1070 lustre_end_log(sb, profilenm, &cfg);
1071
1072 params_cfg.cfg_instance = sb;
1073 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1074
1075 if (sbi->ll_md_exp) {
1076 obd = class_exp2obd(sbi->ll_md_exp);
1077 if (obd)
1078 force = obd->obd_force;
1079 }
1080
1081 /* We need to set force before the lov_disconnect in
1082 lustre_common_put_super, since l_d cleans up osc's as well. */
1083 if (force) {
1084 next = 0;
1085 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1086 &next)) != NULL) {
1087 obd->obd_force = force;
1088 }
1089 }
1090
1091 if (sbi->ll_lcq) {
1092 /* Only if client_common_fill_super succeeded */
1093 client_common_put_super(sb);
1094 }
1095
1096 next = 0;
1097 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1098 class_manual_cleanup(obd);
1099
1100 if (sbi->ll_flags & LL_SBI_VERBOSE)
1101 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1102
1103 if (profilenm)
1104 class_del_profile(profilenm);
1105
1106 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1107 bdi_destroy(&lsi->lsi_bdi);
1108 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1109 }
1110
1111 ll_free_sbi(sb);
1112 lsi->lsi_llsbi = NULL;
1113
1114 lustre_common_put_super(sb);
1115
1116 module_put(THIS_MODULE);
1117 } /* client_put_super */
1118
1119 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1120 {
1121 struct inode *inode = NULL;
1122
1123 /* NOTE: we depend on atomic igrab() -bzzz */
1124 lock_res_and_lock(lock);
1125 if (lock->l_resource->lr_lvb_inode) {
1126 struct ll_inode_info *lli;
1127
1128 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1129 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1130 inode = igrab(lock->l_resource->lr_lvb_inode);
1131 } else {
1132 inode = lock->l_resource->lr_lvb_inode;
1133 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1134 D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
1135 lock->l_resource->lr_lvb_inode,
1136 lli->lli_inode_magic);
1137 inode = NULL;
1138 }
1139 }
1140 unlock_res_and_lock(lock);
1141 return inode;
1142 }
1143
1144 void ll_clear_inode(struct inode *inode)
1145 {
1146 struct ll_inode_info *lli = ll_i2info(inode);
1147 struct ll_sb_info *sbi = ll_i2sbi(inode);
1148
1149 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1150 inode->i_generation, inode);
1151
1152 if (S_ISDIR(inode->i_mode)) {
1153 /* these should have been cleared in ll_file_release */
1154 LASSERT(lli->lli_opendir_key == NULL);
1155 LASSERT(lli->lli_sai == NULL);
1156 LASSERT(lli->lli_opendir_pid == 0);
1157 }
1158
1159 spin_lock(&lli->lli_lock);
1160 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1161 spin_unlock(&lli->lli_lock);
1162 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1163
1164 LASSERT(!lli->lli_open_fd_write_count);
1165 LASSERT(!lli->lli_open_fd_read_count);
1166 LASSERT(!lli->lli_open_fd_exec_count);
1167
1168 if (lli->lli_mds_write_och)
1169 ll_md_real_close(inode, FMODE_WRITE);
1170 if (lli->lli_mds_exec_och)
1171 ll_md_real_close(inode, FMODE_EXEC);
1172 if (lli->lli_mds_read_och)
1173 ll_md_real_close(inode, FMODE_READ);
1174
1175 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1176 OBD_FREE(lli->lli_symlink_name,
1177 strlen(lli->lli_symlink_name) + 1);
1178 lli->lli_symlink_name = NULL;
1179 }
1180
1181 ll_xattr_cache_destroy(inode);
1182
1183 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1184 LASSERT(lli->lli_posix_acl == NULL);
1185 if (lli->lli_remote_perms) {
1186 free_rmtperm_hash(lli->lli_remote_perms);
1187 lli->lli_remote_perms = NULL;
1188 }
1189 }
1190 #ifdef CONFIG_FS_POSIX_ACL
1191 else if (lli->lli_posix_acl) {
1192 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1193 LASSERT(lli->lli_remote_perms == NULL);
1194 posix_acl_release(lli->lli_posix_acl);
1195 lli->lli_posix_acl = NULL;
1196 }
1197 #endif
1198 lli->lli_inode_magic = LLI_INODE_DEAD;
1199
1200 ll_clear_inode_capas(inode);
1201 if (!S_ISDIR(inode->i_mode))
1202 LASSERT(list_empty(&lli->lli_agl_list));
1203
1204 /*
1205 * XXX This has to be done before lsm is freed below, because
1206 * cl_object still uses inode lsm.
1207 */
1208 cl_inode_fini(inode);
1209 lli->lli_has_smd = false;
1210 }
1211
1212 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1213 struct md_open_data **mod)
1214 {
1215 struct lustre_md md;
1216 struct inode *inode = dentry->d_inode;
1217 struct ll_sb_info *sbi = ll_i2sbi(inode);
1218 struct ptlrpc_request *request = NULL;
1219 int rc, ia_valid;
1220
1221 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1222 LUSTRE_OPC_ANY, NULL);
1223 if (IS_ERR(op_data))
1224 return PTR_ERR(op_data);
1225
1226 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1227 &request, mod);
1228 if (rc) {
1229 ptlrpc_req_finished(request);
1230 if (rc == -ENOENT) {
1231 clear_nlink(inode);
1232 /* Unlinked special device node? Or just a race?
1233 * Pretend we done everything. */
1234 if (!S_ISREG(inode->i_mode) &&
1235 !S_ISDIR(inode->i_mode)) {
1236 ia_valid = op_data->op_attr.ia_valid;
1237 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1238 rc = simple_setattr(dentry, &op_data->op_attr);
1239 op_data->op_attr.ia_valid = ia_valid;
1240 }
1241 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1242 CERROR("md_setattr fails: rc = %d\n", rc);
1243 }
1244 return rc;
1245 }
1246
1247 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1248 sbi->ll_md_exp, &md);
1249 if (rc) {
1250 ptlrpc_req_finished(request);
1251 return rc;
1252 }
1253
1254 ia_valid = op_data->op_attr.ia_valid;
1255 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1256 * cache is not cleared yet. */
1257 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1258 rc = simple_setattr(dentry, &op_data->op_attr);
1259 op_data->op_attr.ia_valid = ia_valid;
1260
1261 /* Extract epoch data if obtained. */
1262 op_data->op_handle = md.body->handle;
1263 op_data->op_ioepoch = md.body->ioepoch;
1264
1265 ll_update_inode(inode, &md);
1266 ptlrpc_req_finished(request);
1267
1268 return rc;
1269 }
1270
1271 /* Close IO epoch and send Size-on-MDS attribute update. */
1272 static int ll_setattr_done_writing(struct inode *inode,
1273 struct md_op_data *op_data,
1274 struct md_open_data *mod)
1275 {
1276 struct ll_inode_info *lli = ll_i2info(inode);
1277 int rc = 0;
1278
1279 LASSERT(op_data != NULL);
1280 if (!S_ISREG(inode->i_mode))
1281 return 0;
1282
1283 CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
1284 op_data->op_ioepoch, PFID(&lli->lli_fid));
1285
1286 op_data->op_flags = MF_EPOCH_CLOSE;
1287 ll_done_writing_attr(inode, op_data);
1288 ll_pack_inode2opdata(inode, op_data, NULL);
1289
1290 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1291 if (rc == -EAGAIN) {
1292 /* MDS has instructed us to obtain Size-on-MDS attribute
1293 * from OSTs and send setattr to back to MDS. */
1294 rc = ll_som_update(inode, op_data);
1295 } else if (rc) {
1296 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1297 inode->i_ino, rc);
1298 }
1299 return rc;
1300 }
1301
1302 static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
1303 {
1304 struct obd_capa *capa;
1305 int rc;
1306
1307 if (attr->ia_valid & ATTR_SIZE)
1308 capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1309 else
1310 capa = ll_mdscapa_get(inode);
1311
1312 rc = cl_setattr_ost(inode, attr, capa);
1313
1314 if (attr->ia_valid & ATTR_SIZE)
1315 ll_truncate_free_capa(capa);
1316 else
1317 capa_put(capa);
1318
1319 return rc;
1320 }
1321
1322
1323 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1324 * object(s) determine the file size and mtime. Otherwise, the MDS will
1325 * keep these values until such a time that objects are allocated for it.
1326 * We do the MDS operations first, as it is checking permissions for us.
1327 * We don't to the MDS RPC if there is nothing that we want to store there,
1328 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1329 * going to do an RPC anyways.
1330 *
1331 * If we are doing a truncate, we will send the mtime and ctime updates
1332 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1333 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1334 * at the same time.
1335 *
1336 * In case of HSMimport, we only set attr on MDS.
1337 */
1338 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1339 {
1340 struct inode *inode = dentry->d_inode;
1341 struct ll_inode_info *lli = ll_i2info(inode);
1342 struct md_op_data *op_data = NULL;
1343 struct md_open_data *mod = NULL;
1344 bool file_is_released = false;
1345 int rc = 0, rc1 = 0;
1346
1347 CDEBUG(D_VFSTRACE,
1348 "%s: setattr inode %p/fid:"DFID
1349 " from %llu to %llu, valid %x, hsm_import %d\n",
1350 ll_get_fsname(inode->i_sb, NULL, 0), inode,
1351 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
1352 attr->ia_valid, hsm_import);
1353
1354 if (attr->ia_valid & ATTR_SIZE) {
1355 /* Check new size against VFS/VM file size limit and rlimit */
1356 rc = inode_newsize_ok(inode, attr->ia_size);
1357 if (rc)
1358 return rc;
1359
1360 /* The maximum Lustre file size is variable, based on the
1361 * OST maximum object size and number of stripes. This
1362 * needs another check in addition to the VFS check above. */
1363 if (attr->ia_size > ll_file_maxbytes(inode)) {
1364 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
1365 PFID(&lli->lli_fid), attr->ia_size,
1366 ll_file_maxbytes(inode));
1367 return -EFBIG;
1368 }
1369
1370 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1371 }
1372
1373 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1374 if (attr->ia_valid & TIMES_SET_FLAGS) {
1375 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1376 !capable(CFS_CAP_FOWNER))
1377 return -EPERM;
1378 }
1379
1380 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1381 if (attr->ia_valid & ATTR_CTIME) {
1382 attr->ia_ctime = CURRENT_TIME;
1383 attr->ia_valid |= ATTR_CTIME_SET;
1384 }
1385 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1386 (attr->ia_valid & ATTR_ATIME)) {
1387 attr->ia_atime = CURRENT_TIME;
1388 attr->ia_valid |= ATTR_ATIME_SET;
1389 }
1390 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1391 (attr->ia_valid & ATTR_MTIME)) {
1392 attr->ia_mtime = CURRENT_TIME;
1393 attr->ia_valid |= ATTR_MTIME_SET;
1394 }
1395
1396 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1397 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1398 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1399 get_seconds());
1400
1401 /* If we are changing file size, file content is modified, flag it. */
1402 if (attr->ia_valid & ATTR_SIZE) {
1403 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1404 spin_lock(&lli->lli_lock);
1405 lli->lli_flags |= LLIF_DATA_MODIFIED;
1406 spin_unlock(&lli->lli_lock);
1407 }
1408
1409 /* We always do an MDS RPC, even if we're only changing the size;
1410 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1411
1412 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1413 if (!op_data)
1414 return -ENOMEM;
1415
1416 if (!S_ISDIR(inode->i_mode)) {
1417 if (attr->ia_valid & ATTR_SIZE)
1418 inode_dio_write_done(inode);
1419 mutex_unlock(&inode->i_mutex);
1420 }
1421
1422 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1423
1424 /* Open epoch for truncate. */
1425 if (exp_connect_som(ll_i2mdexp(inode)) &&
1426 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1427 op_data->op_flags = MF_EPOCH_OPEN;
1428
1429 /* truncate on a released file must failed with -ENODATA,
1430 * so size must not be set on MDS for released file
1431 * but other attributes must be set
1432 */
1433 if (S_ISREG(inode->i_mode)) {
1434 struct lov_stripe_md *lsm;
1435 __u32 gen;
1436
1437 ll_layout_refresh(inode, &gen);
1438 lsm = ccc_inode_lsm_get(inode);
1439 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1440 file_is_released = true;
1441 ccc_inode_lsm_put(inode, lsm);
1442 }
1443
1444 /* if not in HSM import mode, clear size attr for released file
1445 * we clear the attribute send to MDT in op_data, not the original
1446 * received from caller in attr which is used later to
1447 * decide return code */
1448 if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
1449 op_data->op_attr.ia_valid &= ~ATTR_SIZE;
1450
1451 rc = ll_md_setattr(dentry, op_data, &mod);
1452 if (rc)
1453 goto out;
1454
1455 /* truncate failed (only when non HSM import), others succeed */
1456 if (file_is_released) {
1457 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1458 rc = -ENODATA;
1459 else
1460 rc = 0;
1461 goto out;
1462 }
1463
1464 /* RPC to MDT is sent, cancel data modification flag */
1465 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1466 spin_lock(&lli->lli_lock);
1467 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1468 spin_unlock(&lli->lli_lock);
1469 }
1470
1471 ll_ioepoch_open(lli, op_data->op_ioepoch);
1472 if (!S_ISREG(inode->i_mode)) {
1473 rc = 0;
1474 goto out;
1475 }
1476
1477 if (attr->ia_valid & (ATTR_SIZE |
1478 ATTR_ATIME | ATTR_ATIME_SET |
1479 ATTR_MTIME | ATTR_MTIME_SET))
1480 /* For truncate and utimes sending attributes to OSTs, setting
1481 * mtime/atime to the past will be performed under PW [0:EOF]
1482 * extent lock (new_size:EOF for truncate). It may seem
1483 * excessive to send mtime/atime updates to OSTs when not
1484 * setting times to past, but it is necessary due to possible
1485 * time de-synchronization between MDT inode and OST objects */
1486 if (attr->ia_valid & ATTR_SIZE)
1487 down_write(&lli->lli_trunc_sem);
1488 rc = ll_setattr_ost(inode, attr);
1489 if (attr->ia_valid & ATTR_SIZE)
1490 up_write(&lli->lli_trunc_sem);
1491 out:
1492 if (op_data) {
1493 if (op_data->op_ioepoch) {
1494 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1495 if (!rc)
1496 rc = rc1;
1497 }
1498 ll_finish_md_op_data(op_data);
1499 }
1500 if (!S_ISDIR(inode->i_mode)) {
1501 mutex_lock(&inode->i_mutex);
1502 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1503 inode_dio_wait(inode);
1504 }
1505
1506 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1507 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1508
1509 return rc;
1510 }
1511
1512 int ll_setattr(struct dentry *de, struct iattr *attr)
1513 {
1514 int mode = de->d_inode->i_mode;
1515
1516 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1517 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1518 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1519
1520 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1521 (ATTR_SIZE|ATTR_MODE)) &&
1522 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1523 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1524 !(attr->ia_mode & S_ISGID))))
1525 attr->ia_valid |= ATTR_FORCE;
1526
1527 if ((attr->ia_valid & ATTR_MODE) &&
1528 (mode & S_ISUID) &&
1529 !(attr->ia_mode & S_ISUID) &&
1530 !(attr->ia_valid & ATTR_KILL_SUID))
1531 attr->ia_valid |= ATTR_KILL_SUID;
1532
1533 if ((attr->ia_valid & ATTR_MODE) &&
1534 ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1535 !(attr->ia_mode & S_ISGID) &&
1536 !(attr->ia_valid & ATTR_KILL_SGID))
1537 attr->ia_valid |= ATTR_KILL_SGID;
1538
1539 return ll_setattr_raw(de, attr, false);
1540 }
1541
1542 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1543 __u64 max_age, __u32 flags)
1544 {
1545 struct ll_sb_info *sbi = ll_s2sbi(sb);
1546 struct obd_statfs obd_osfs;
1547 int rc;
1548
1549 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1550 if (rc) {
1551 CERROR("md_statfs fails: rc = %d\n", rc);
1552 return rc;
1553 }
1554
1555 osfs->os_type = sb->s_magic;
1556
1557 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1558 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1559 osfs->os_files);
1560
1561 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1562 flags |= OBD_STATFS_NODELAY;
1563
1564 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1565 if (rc) {
1566 CERROR("obd_statfs fails: rc = %d\n", rc);
1567 return rc;
1568 }
1569
1570 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
1571 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1572 obd_osfs.os_files);
1573
1574 osfs->os_bsize = obd_osfs.os_bsize;
1575 osfs->os_blocks = obd_osfs.os_blocks;
1576 osfs->os_bfree = obd_osfs.os_bfree;
1577 osfs->os_bavail = obd_osfs.os_bavail;
1578
1579 /* If we don't have as many objects free on the OST as inodes
1580 * on the MDS, we reduce the total number of inodes to
1581 * compensate, so that the "inodes in use" number is correct.
1582 */
1583 if (obd_osfs.os_ffree < osfs->os_ffree) {
1584 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1585 obd_osfs.os_ffree;
1586 osfs->os_ffree = obd_osfs.os_ffree;
1587 }
1588
1589 return rc;
1590 }
1591 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1592 {
1593 struct super_block *sb = de->d_sb;
1594 struct obd_statfs osfs;
1595 int rc;
1596
1597 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
1598 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1599
1600 /* Some amount of caching on the client is allowed */
1601 rc = ll_statfs_internal(sb, &osfs,
1602 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1603 0);
1604 if (rc)
1605 return rc;
1606
1607 statfs_unpack(sfs, &osfs);
1608
1609 /* We need to downshift for all 32-bit kernels, because we can't
1610 * tell if the kernel is being called via sys_statfs64() or not.
1611 * Stop before overflowing f_bsize - in which case it is better
1612 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1613 if (sizeof(long) < 8) {
1614 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1615 sfs->f_bsize <<= 1;
1616
1617 osfs.os_blocks >>= 1;
1618 osfs.os_bfree >>= 1;
1619 osfs.os_bavail >>= 1;
1620 }
1621 }
1622
1623 sfs->f_blocks = osfs.os_blocks;
1624 sfs->f_bfree = osfs.os_bfree;
1625 sfs->f_bavail = osfs.os_bavail;
1626 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
1627 return 0;
1628 }
1629
1630 void ll_inode_size_lock(struct inode *inode)
1631 {
1632 struct ll_inode_info *lli;
1633
1634 LASSERT(!S_ISDIR(inode->i_mode));
1635
1636 lli = ll_i2info(inode);
1637 mutex_lock(&lli->lli_size_mutex);
1638 }
1639
1640 void ll_inode_size_unlock(struct inode *inode)
1641 {
1642 struct ll_inode_info *lli;
1643
1644 lli = ll_i2info(inode);
1645 mutex_unlock(&lli->lli_size_mutex);
1646 }
1647
1648 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1649 {
1650 struct ll_inode_info *lli = ll_i2info(inode);
1651 struct mdt_body *body = md->body;
1652 struct lov_stripe_md *lsm = md->lsm;
1653 struct ll_sb_info *sbi = ll_i2sbi(inode);
1654
1655 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1656 if (lsm != NULL) {
1657 if (!lli->lli_has_smd &&
1658 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1659 cl_file_inode_init(inode, md);
1660
1661 lli->lli_maxbytes = lsm->lsm_maxbytes;
1662 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1663 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1664 }
1665
1666 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1667 if (body->valid & OBD_MD_FLRMTPERM)
1668 ll_update_remote_perm(inode, md->remote_perm);
1669 }
1670 #ifdef CONFIG_FS_POSIX_ACL
1671 else if (body->valid & OBD_MD_FLACL) {
1672 spin_lock(&lli->lli_lock);
1673 if (lli->lli_posix_acl)
1674 posix_acl_release(lli->lli_posix_acl);
1675 lli->lli_posix_acl = md->posix_acl;
1676 spin_unlock(&lli->lli_lock);
1677 }
1678 #endif
1679 inode->i_ino = cl_fid_build_ino(&body->fid1,
1680 sbi->ll_flags & LL_SBI_32BIT_API);
1681 inode->i_generation = cl_fid_build_gen(&body->fid1);
1682
1683 if (body->valid & OBD_MD_FLATIME) {
1684 if (body->atime > LTIME_S(inode->i_atime))
1685 LTIME_S(inode->i_atime) = body->atime;
1686 lli->lli_lvb.lvb_atime = body->atime;
1687 }
1688 if (body->valid & OBD_MD_FLMTIME) {
1689 if (body->mtime > LTIME_S(inode->i_mtime)) {
1690 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1691 inode->i_ino, LTIME_S(inode->i_mtime),
1692 body->mtime);
1693 LTIME_S(inode->i_mtime) = body->mtime;
1694 }
1695 lli->lli_lvb.lvb_mtime = body->mtime;
1696 }
1697 if (body->valid & OBD_MD_FLCTIME) {
1698 if (body->ctime > LTIME_S(inode->i_ctime))
1699 LTIME_S(inode->i_ctime) = body->ctime;
1700 lli->lli_lvb.lvb_ctime = body->ctime;
1701 }
1702 if (body->valid & OBD_MD_FLMODE)
1703 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1704 if (body->valid & OBD_MD_FLTYPE)
1705 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1706 LASSERT(inode->i_mode != 0);
1707 if (S_ISREG(inode->i_mode))
1708 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1709 LL_MAX_BLKSIZE_BITS);
1710 else
1711 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1712 if (body->valid & OBD_MD_FLUID)
1713 inode->i_uid = make_kuid(&init_user_ns, body->uid);
1714 if (body->valid & OBD_MD_FLGID)
1715 inode->i_gid = make_kgid(&init_user_ns, body->gid);
1716 if (body->valid & OBD_MD_FLFLAGS)
1717 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1718 if (body->valid & OBD_MD_FLNLINK)
1719 set_nlink(inode, body->nlink);
1720 if (body->valid & OBD_MD_FLRDEV)
1721 inode->i_rdev = old_decode_dev(body->rdev);
1722
1723 if (body->valid & OBD_MD_FLID) {
1724 /* FID shouldn't be changed! */
1725 if (fid_is_sane(&lli->lli_fid)) {
1726 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1727 "Trying to change FID "DFID
1728 " to the "DFID", inode %lu/%u(%p)\n",
1729 PFID(&lli->lli_fid), PFID(&body->fid1),
1730 inode->i_ino, inode->i_generation, inode);
1731 } else
1732 lli->lli_fid = body->fid1;
1733 }
1734
1735 LASSERT(fid_seq(&lli->lli_fid) != 0);
1736
1737 if (body->valid & OBD_MD_FLSIZE) {
1738 if (exp_connect_som(ll_i2mdexp(inode)) &&
1739 S_ISREG(inode->i_mode)) {
1740 struct lustre_handle lockh;
1741 ldlm_mode_t mode;
1742
1743 /* As it is possible a blocking ast has been processed
1744 * by this time, we need to check there is an UPDATE
1745 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1746 * it. */
1747 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1748 &lockh, LDLM_FL_CBPENDING,
1749 LCK_CR | LCK_CW |
1750 LCK_PR | LCK_PW);
1751 if (mode) {
1752 if (lli->lli_flags & (LLIF_DONE_WRITING |
1753 LLIF_EPOCH_PENDING |
1754 LLIF_SOM_DIRTY)) {
1755 CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
1756 inode->i_ino, lli->lli_flags);
1757 } else {
1758 /* Use old size assignment to avoid
1759 * deadlock bz14138 & bz14326 */
1760 i_size_write(inode, body->size);
1761 spin_lock(&lli->lli_lock);
1762 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1763 spin_unlock(&lli->lli_lock);
1764 }
1765 ldlm_lock_decref(&lockh, mode);
1766 }
1767 } else {
1768 /* Use old size assignment to avoid
1769 * deadlock bz14138 & bz14326 */
1770 i_size_write(inode, body->size);
1771
1772 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1773 inode->i_ino, (unsigned long long)body->size);
1774 }
1775
1776 if (body->valid & OBD_MD_FLBLOCKS)
1777 inode->i_blocks = body->blocks;
1778 }
1779
1780 if (body->valid & OBD_MD_FLMDSCAPA) {
1781 LASSERT(md->mds_capa);
1782 ll_add_capa(inode, md->mds_capa);
1783 }
1784 if (body->valid & OBD_MD_FLOSSCAPA) {
1785 LASSERT(md->oss_capa);
1786 ll_add_capa(inode, md->oss_capa);
1787 }
1788
1789 if (body->valid & OBD_MD_TSTATE) {
1790 if (body->t_state & MS_RESTORE)
1791 lli->lli_flags |= LLIF_FILE_RESTORING;
1792 }
1793 }
1794
1795 void ll_read_inode2(struct inode *inode, void *opaque)
1796 {
1797 struct lustre_md *md = opaque;
1798 struct ll_inode_info *lli = ll_i2info(inode);
1799
1800 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1801 PFID(&lli->lli_fid), inode);
1802
1803 LASSERT(!lli->lli_has_smd);
1804
1805 /* Core attributes from the MDS first. This is a new inode, and
1806 * the VFS doesn't zero times in the core inode so we have to do
1807 * it ourselves. They will be overwritten by either MDS or OST
1808 * attributes - we just need to make sure they aren't newer. */
1809 LTIME_S(inode->i_mtime) = 0;
1810 LTIME_S(inode->i_atime) = 0;
1811 LTIME_S(inode->i_ctime) = 0;
1812 inode->i_rdev = 0;
1813 ll_update_inode(inode, md);
1814
1815 /* OIDEBUG(inode); */
1816
1817 /* initializing backing dev info. */
1818 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1819
1820
1821 if (S_ISREG(inode->i_mode)) {
1822 struct ll_sb_info *sbi = ll_i2sbi(inode);
1823
1824 inode->i_op = &ll_file_inode_operations;
1825 inode->i_fop = sbi->ll_fop;
1826 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1827 } else if (S_ISDIR(inode->i_mode)) {
1828 inode->i_op = &ll_dir_inode_operations;
1829 inode->i_fop = &ll_dir_operations;
1830 } else if (S_ISLNK(inode->i_mode)) {
1831 inode->i_op = &ll_fast_symlink_inode_operations;
1832 } else {
1833 inode->i_op = &ll_special_inode_operations;
1834
1835 init_special_inode(inode, inode->i_mode,
1836 inode->i_rdev);
1837 }
1838 }
1839
1840 void ll_delete_inode(struct inode *inode)
1841 {
1842 struct cl_inode_info *lli = cl_i2info(inode);
1843
1844 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
1845 /* discard all dirty pages before truncating them, required by
1846 * osc_extent implementation at LU-1030. */
1847 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1848 CL_FSYNC_DISCARD, 1);
1849
1850 truncate_inode_pages_final(&inode->i_data);
1851
1852 /* Workaround for LU-118 */
1853 if (inode->i_data.nrpages) {
1854 spin_lock_irq(&inode->i_data.tree_lock);
1855 spin_unlock_irq(&inode->i_data.tree_lock);
1856 LASSERTF(inode->i_data.nrpages == 0,
1857 "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1858 inode->i_ino, inode->i_generation, inode,
1859 inode->i_data.nrpages);
1860 }
1861 /* Workaround end */
1862
1863 ll_clear_inode(inode);
1864 clear_inode(inode);
1865 }
1866
1867 int ll_iocontrol(struct inode *inode, struct file *file,
1868 unsigned int cmd, unsigned long arg)
1869 {
1870 struct ll_sb_info *sbi = ll_i2sbi(inode);
1871 struct ptlrpc_request *req = NULL;
1872 int rc, flags = 0;
1873
1874 switch (cmd) {
1875 case FSFILT_IOC_GETFLAGS: {
1876 struct mdt_body *body;
1877 struct md_op_data *op_data;
1878
1879 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1880 0, 0, LUSTRE_OPC_ANY,
1881 NULL);
1882 if (IS_ERR(op_data))
1883 return PTR_ERR(op_data);
1884
1885 op_data->op_valid = OBD_MD_FLFLAGS;
1886 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1887 ll_finish_md_op_data(op_data);
1888 if (rc) {
1889 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1890 return -abs(rc);
1891 }
1892
1893 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1894
1895 flags = body->flags;
1896
1897 ptlrpc_req_finished(req);
1898
1899 return put_user(flags, (int *)arg);
1900 }
1901 case FSFILT_IOC_SETFLAGS: {
1902 struct lov_stripe_md *lsm;
1903 struct obd_info oinfo = { { { 0 } } };
1904 struct md_op_data *op_data;
1905
1906 if (get_user(flags, (int *)arg))
1907 return -EFAULT;
1908
1909 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1910 LUSTRE_OPC_ANY, NULL);
1911 if (IS_ERR(op_data))
1912 return PTR_ERR(op_data);
1913
1914 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1915 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1916 rc = md_setattr(sbi->ll_md_exp, op_data,
1917 NULL, 0, NULL, 0, &req, NULL);
1918 ll_finish_md_op_data(op_data);
1919 ptlrpc_req_finished(req);
1920 if (rc)
1921 return rc;
1922
1923 inode->i_flags = ll_ext_to_inode_flags(flags);
1924
1925 lsm = ccc_inode_lsm_get(inode);
1926 if (!lsm_has_objects(lsm)) {
1927 ccc_inode_lsm_put(inode, lsm);
1928 return 0;
1929 }
1930
1931 OBDO_ALLOC(oinfo.oi_oa);
1932 if (!oinfo.oi_oa) {
1933 ccc_inode_lsm_put(inode, lsm);
1934 return -ENOMEM;
1935 }
1936 oinfo.oi_md = lsm;
1937 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1938 oinfo.oi_oa->o_flags = flags;
1939 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1940 OBD_MD_FLGROUP;
1941 oinfo.oi_capa = ll_mdscapa_get(inode);
1942 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1943 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1944 capa_put(oinfo.oi_capa);
1945 OBDO_FREE(oinfo.oi_oa);
1946 ccc_inode_lsm_put(inode, lsm);
1947
1948 if (rc && rc != -EPERM && rc != -EACCES)
1949 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1950
1951 return rc;
1952 }
1953 default:
1954 return -ENOSYS;
1955 }
1956
1957 return 0;
1958 }
1959
1960 int ll_flush_ctx(struct inode *inode)
1961 {
1962 struct ll_sb_info *sbi = ll_i2sbi(inode);
1963
1964 CDEBUG(D_SEC, "flush context for user %d\n",
1965 from_kuid(&init_user_ns, current_uid()));
1966
1967 obd_set_info_async(NULL, sbi->ll_md_exp,
1968 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1969 0, NULL, NULL);
1970 obd_set_info_async(NULL, sbi->ll_dt_exp,
1971 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1972 0, NULL, NULL);
1973 return 0;
1974 }
1975
1976 /* umount -f client means force down, don't save state */
1977 void ll_umount_begin(struct super_block *sb)
1978 {
1979 struct ll_sb_info *sbi = ll_s2sbi(sb);
1980 struct obd_device *obd;
1981 struct obd_ioctl_data *ioc_data;
1982
1983 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1984 sb->s_count, atomic_read(&sb->s_active));
1985
1986 obd = class_exp2obd(sbi->ll_md_exp);
1987 if (obd == NULL) {
1988 CERROR("Invalid MDC connection handle %#llx\n",
1989 sbi->ll_md_exp->exp_handle.h_cookie);
1990 return;
1991 }
1992 obd->obd_force = 1;
1993
1994 obd = class_exp2obd(sbi->ll_dt_exp);
1995 if (obd == NULL) {
1996 CERROR("Invalid LOV connection handle %#llx\n",
1997 sbi->ll_dt_exp->exp_handle.h_cookie);
1998 return;
1999 }
2000 obd->obd_force = 1;
2001
2002 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
2003 if (ioc_data) {
2004 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2005 sizeof(*ioc_data), ioc_data, NULL);
2006
2007 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2008 sizeof(*ioc_data), ioc_data, NULL);
2009
2010 OBD_FREE_PTR(ioc_data);
2011 }
2012
2013 /* Really, we'd like to wait until there are no requests outstanding,
2014 * and then continue. For now, we just invalidate the requests,
2015 * schedule() and sleep one second if needed, and hope.
2016 */
2017 schedule();
2018 }
2019
2020 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2021 {
2022 struct ll_sb_info *sbi = ll_s2sbi(sb);
2023 char *profilenm = get_profile_name(sb);
2024 int err;
2025 __u32 read_only;
2026
2027 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2028 read_only = *flags & MS_RDONLY;
2029 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2030 sizeof(KEY_READ_ONLY),
2031 KEY_READ_ONLY, sizeof(read_only),
2032 &read_only, NULL);
2033 if (err) {
2034 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2035 profilenm, read_only ?
2036 "read-only" : "read-write", err);
2037 return err;
2038 }
2039
2040 if (read_only)
2041 sb->s_flags |= MS_RDONLY;
2042 else
2043 sb->s_flags &= ~MS_RDONLY;
2044
2045 if (sbi->ll_flags & LL_SBI_VERBOSE)
2046 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2047 read_only ? "read-only" : "read-write");
2048 }
2049 return 0;
2050 }
2051
2052 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2053 struct super_block *sb, struct lookup_intent *it)
2054 {
2055 struct ll_sb_info *sbi = NULL;
2056 struct lustre_md md;
2057 int rc;
2058
2059 LASSERT(*inode || sb);
2060 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2061 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2062 sbi->ll_md_exp, &md);
2063 if (rc)
2064 return rc;
2065
2066 if (*inode) {
2067 ll_update_inode(*inode, &md);
2068 } else {
2069 LASSERT(sb != NULL);
2070
2071 /*
2072 * At this point server returns to client's same fid as client
2073 * generated for creating. So using ->fid1 is okay here.
2074 */
2075 LASSERT(fid_is_sane(&md.body->fid1));
2076
2077 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
2078 sbi->ll_flags & LL_SBI_32BIT_API),
2079 &md);
2080 if (*inode == NULL || IS_ERR(*inode)) {
2081 #ifdef CONFIG_FS_POSIX_ACL
2082 if (md.posix_acl) {
2083 posix_acl_release(md.posix_acl);
2084 md.posix_acl = NULL;
2085 }
2086 #endif
2087 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2088 *inode = NULL;
2089 CERROR("new_inode -fatal: rc %d\n", rc);
2090 goto out;
2091 }
2092 }
2093
2094 /* Handling piggyback layout lock.
2095 * Layout lock can be piggybacked by getattr and open request.
2096 * The lsm can be applied to inode only if it comes with a layout lock
2097 * otherwise correct layout may be overwritten, for example:
2098 * 1. proc1: mdt returns a lsm but not granting layout
2099 * 2. layout was changed by another client
2100 * 3. proc2: refresh layout and layout lock granted
2101 * 4. proc1: to apply a stale layout */
2102 if (it != NULL && it->d.lustre.it_lock_mode != 0) {
2103 struct lustre_handle lockh;
2104 struct ldlm_lock *lock;
2105
2106 lockh.cookie = it->d.lustre.it_lock_handle;
2107 lock = ldlm_handle2lock(&lockh);
2108 LASSERT(lock != NULL);
2109 if (ldlm_has_layout(lock)) {
2110 struct cl_object_conf conf;
2111
2112 memset(&conf, 0, sizeof(conf));
2113 conf.coc_opc = OBJECT_CONF_SET;
2114 conf.coc_inode = *inode;
2115 conf.coc_lock = lock;
2116 conf.u.coc_md = &md;
2117 (void)ll_layout_conf(*inode, &conf);
2118 }
2119 LDLM_LOCK_PUT(lock);
2120 }
2121
2122 out:
2123 if (md.lsm != NULL)
2124 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2125 md_free_lustre_md(sbi->ll_md_exp, &md);
2126 return rc;
2127 }
2128
2129 int ll_obd_statfs(struct inode *inode, void *arg)
2130 {
2131 struct ll_sb_info *sbi = NULL;
2132 struct obd_export *exp;
2133 char *buf = NULL;
2134 struct obd_ioctl_data *data = NULL;
2135 __u32 type;
2136 __u32 flags;
2137 int len = 0, rc;
2138
2139 if (!inode) {
2140 rc = -EINVAL;
2141 goto out_statfs;
2142 }
2143
2144 sbi = ll_i2sbi(inode);
2145 if (!sbi) {
2146 rc = -EINVAL;
2147 goto out_statfs;
2148 }
2149
2150 rc = obd_ioctl_getdata(&buf, &len, arg);
2151 if (rc)
2152 goto out_statfs;
2153
2154 data = (void *)buf;
2155 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2156 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2157 rc = -EINVAL;
2158 goto out_statfs;
2159 }
2160
2161 if (data->ioc_inllen1 != sizeof(__u32) ||
2162 data->ioc_inllen2 != sizeof(__u32) ||
2163 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2164 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2165 rc = -EINVAL;
2166 goto out_statfs;
2167 }
2168
2169 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2170 if (type & LL_STATFS_LMV)
2171 exp = sbi->ll_md_exp;
2172 else if (type & LL_STATFS_LOV)
2173 exp = sbi->ll_dt_exp;
2174 else {
2175 rc = -ENODEV;
2176 goto out_statfs;
2177 }
2178
2179 flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
2180 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
2181 if (rc)
2182 goto out_statfs;
2183 out_statfs:
2184 if (buf)
2185 obd_ioctl_freedata(buf, len);
2186 return rc;
2187 }
2188
2189 int ll_process_config(struct lustre_cfg *lcfg)
2190 {
2191 char *ptr;
2192 void *sb;
2193 struct lprocfs_static_vars lvars;
2194 unsigned long x;
2195 int rc = 0;
2196
2197 lprocfs_llite_init_vars(&lvars);
2198
2199 /* The instance name contains the sb: lustre-client-aacfe000 */
2200 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2201 if (!ptr || !*(++ptr))
2202 return -EINVAL;
2203 if (sscanf(ptr, "%lx", &x) != 1)
2204 return -EINVAL;
2205 sb = (void *)x;
2206 /* This better be a real Lustre superblock! */
2207 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2208
2209 /* Note we have not called client_common_fill_super yet, so
2210 proc fns must be able to handle that! */
2211 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2212 lcfg, sb);
2213 if (rc > 0)
2214 rc = 0;
2215 return rc;
2216 }
2217
2218 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2219 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2220 struct inode *i1, struct inode *i2,
2221 const char *name, int namelen,
2222 int mode, __u32 opc, void *data)
2223 {
2224 LASSERT(i1 != NULL);
2225
2226 if (namelen > ll_i2sbi(i1)->ll_namelen)
2227 return ERR_PTR(-ENAMETOOLONG);
2228
2229 if (op_data == NULL)
2230 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2231
2232 if (op_data == NULL)
2233 return ERR_PTR(-ENOMEM);
2234
2235 ll_i2gids(op_data->op_suppgids, i1, i2);
2236 op_data->op_fid1 = *ll_inode2fid(i1);
2237 op_data->op_capa1 = ll_mdscapa_get(i1);
2238
2239 if (i2) {
2240 op_data->op_fid2 = *ll_inode2fid(i2);
2241 op_data->op_capa2 = ll_mdscapa_get(i2);
2242 } else {
2243 fid_zero(&op_data->op_fid2);
2244 op_data->op_capa2 = NULL;
2245 }
2246
2247 op_data->op_name = name;
2248 op_data->op_namelen = namelen;
2249 op_data->op_mode = mode;
2250 op_data->op_mod_time = get_seconds();
2251 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2252 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2253 op_data->op_cap = cfs_curproc_cap_pack();
2254 op_data->op_bias = 0;
2255 op_data->op_cli_flags = 0;
2256 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2257 filename_is_volatile(name, namelen, NULL))
2258 op_data->op_bias |= MDS_CREATE_VOLATILE;
2259 op_data->op_opc = opc;
2260 op_data->op_mds = 0;
2261 op_data->op_data = data;
2262
2263 /* If the file is being opened after mknod() (normally due to NFS)
2264 * try to use the default stripe data from parent directory for
2265 * allocating OST objects. Try to pass the parent FID to MDS. */
2266 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2267 !ll_i2info(i2)->lli_has_smd) {
2268 struct ll_inode_info *lli = ll_i2info(i2);
2269
2270 spin_lock(&lli->lli_lock);
2271 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2272 op_data->op_fid1 = lli->lli_pfid;
2273 spin_unlock(&lli->lli_lock);
2274 /** We ignore parent's capability temporary. */
2275 }
2276
2277 /* When called by ll_setattr_raw, file is i1. */
2278 if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
2279 op_data->op_bias |= MDS_DATA_MODIFIED;
2280
2281 return op_data;
2282 }
2283
2284 void ll_finish_md_op_data(struct md_op_data *op_data)
2285 {
2286 capa_put(op_data->op_capa1);
2287 capa_put(op_data->op_capa2);
2288 OBD_FREE_PTR(op_data);
2289 }
2290
2291 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2292 {
2293 struct ll_sb_info *sbi;
2294
2295 LASSERT((seq != NULL) && (dentry != NULL));
2296 sbi = ll_s2sbi(dentry->d_sb);
2297
2298 if (sbi->ll_flags & LL_SBI_NOLCK)
2299 seq_puts(seq, ",nolock");
2300
2301 if (sbi->ll_flags & LL_SBI_FLOCK)
2302 seq_puts(seq, ",flock");
2303
2304 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2305 seq_puts(seq, ",localflock");
2306
2307 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2308 seq_puts(seq, ",user_xattr");
2309
2310 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2311 seq_puts(seq, ",lazystatfs");
2312
2313 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2314 seq_puts(seq, ",user_fid2path");
2315
2316 return 0;
2317 }
2318
2319 /**
2320 * Get obd name by cmd, and copy out to user space
2321 */
2322 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2323 {
2324 struct ll_sb_info *sbi = ll_i2sbi(inode);
2325 struct obd_device *obd;
2326
2327 if (cmd == OBD_IOC_GETDTNAME)
2328 obd = class_exp2obd(sbi->ll_dt_exp);
2329 else if (cmd == OBD_IOC_GETMDNAME)
2330 obd = class_exp2obd(sbi->ll_md_exp);
2331 else
2332 return -EINVAL;
2333
2334 if (!obd)
2335 return -ENOENT;
2336
2337 if (copy_to_user((void *)arg, obd->obd_name,
2338 strlen(obd->obd_name) + 1))
2339 return -EFAULT;
2340
2341 return 0;
2342 }
2343
2344 /**
2345 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2346 * fsname will be returned in this buffer; otherwise, a static buffer will be
2347 * used to store the fsname and returned to caller.
2348 */
2349 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2350 {
2351 static char fsname_static[MTI_NAME_MAXLEN];
2352 struct lustre_sb_info *lsi = s2lsi(sb);
2353 char *ptr;
2354 int len;
2355
2356 if (buf == NULL) {
2357 /* this means the caller wants to use static buffer
2358 * and it doesn't care about race. Usually this is
2359 * in error reporting path */
2360 buf = fsname_static;
2361 buflen = sizeof(fsname_static);
2362 }
2363
2364 len = strlen(lsi->lsi_lmd->lmd_profile);
2365 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2366 if (ptr && (strcmp(ptr, "-client") == 0))
2367 len -= 7;
2368
2369 if (unlikely(len >= buflen))
2370 len = buflen - 1;
2371 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2372 buf[len] = '\0';
2373
2374 return buf;
2375 }
2376
2377 static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2378 {
2379 char *path = NULL;
2380
2381 struct path p;
2382
2383 p.dentry = dentry;
2384 p.mnt = current->fs->root.mnt;
2385 path_get(&p);
2386 path = d_path(&p, buf, bufsize);
2387 path_put(&p);
2388
2389 return path;
2390 }
2391
2392 void ll_dirty_page_discard_warn(struct page *page, int ioret)
2393 {
2394 char *buf, *path = NULL;
2395 struct dentry *dentry = NULL;
2396 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2397
2398 /* this can be called inside spin lock so use GFP_ATOMIC. */
2399 buf = (char *)__get_free_page(GFP_ATOMIC);
2400 if (buf != NULL) {
2401 dentry = d_find_alias(page->mapping->host);
2402 if (dentry != NULL)
2403 path = ll_d_path(dentry, buf, PAGE_SIZE);
2404 }
2405
2406 CDEBUG(D_WARNING,
2407 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2408 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2409 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2410 PFID(&obj->cob_header.coh_lu.loh_fid),
2411 (path && !IS_ERR(path)) ? path : "", ioret);
2412
2413 if (dentry != NULL)
2414 dput(dentry);
2415
2416 if (buf != NULL)
2417 free_page((unsigned long)buf);
2418 }
This page took 0.128194 seconds and 5 git commands to generate.