staging/lustre/ptlrpc: race in pinger (use-after-free situation)
[deliverable/linux.git] / drivers / staging / lustre / lustre / include / lustre / lustre_idl.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/include/lustre/lustre_idl.h
37 *
38 * Lustre wire protocol definitions.
39 */
40
41/** \defgroup lustreidl lustreidl
42 *
43 * Lustre wire protocol definitions.
44 *
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
47 *
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
53 *
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
58 *
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
64 *
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
70 *
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
75 *
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
80 *
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
83 *
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
87 *
88 * @{
89 */
90
91#ifndef _LUSTRE_IDL_H_
92#define _LUSTRE_IDL_H_
93
94#if !defined(LASSERT) && !defined(LPU64)
95#include <linux/libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
96#endif
97
98/* Defn's shared with user-space. */
99#include <lustre/lustre_user.h>
100
101/*
102 * GENERAL STUFF
103 */
104/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
105 * FOO_REPLY_PORTAL is for incoming replies on the FOO
106 * FOO_BULK_PORTAL is for incoming bulk on the FOO
107 */
108
109#define CONNMGR_REQUEST_PORTAL 1
110#define CONNMGR_REPLY_PORTAL 2
111//#define OSC_REQUEST_PORTAL 3
112#define OSC_REPLY_PORTAL 4
113//#define OSC_BULK_PORTAL 5
114#define OST_IO_PORTAL 6
115#define OST_CREATE_PORTAL 7
116#define OST_BULK_PORTAL 8
117//#define MDC_REQUEST_PORTAL 9
118#define MDC_REPLY_PORTAL 10
119//#define MDC_BULK_PORTAL 11
120#define MDS_REQUEST_PORTAL 12
121//#define MDS_REPLY_PORTAL 13
122#define MDS_BULK_PORTAL 14
123#define LDLM_CB_REQUEST_PORTAL 15
124#define LDLM_CB_REPLY_PORTAL 16
125#define LDLM_CANCEL_REQUEST_PORTAL 17
126#define LDLM_CANCEL_REPLY_PORTAL 18
127//#define PTLBD_REQUEST_PORTAL 19
128//#define PTLBD_REPLY_PORTAL 20
129//#define PTLBD_BULK_PORTAL 21
130#define MDS_SETATTR_PORTAL 22
131#define MDS_READPAGE_PORTAL 23
132#define MDS_MDS_PORTAL 24
133
134#define MGC_REPLY_PORTAL 25
135#define MGS_REQUEST_PORTAL 26
136#define MGS_REPLY_PORTAL 27
137#define OST_REQUEST_PORTAL 28
138#define FLD_REQUEST_PORTAL 29
139#define SEQ_METADATA_PORTAL 30
140#define SEQ_DATA_PORTAL 31
141#define SEQ_CONTROLLER_PORTAL 32
142#define MGS_BULK_PORTAL 33
143
144/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
145
146/* packet types */
147#define PTL_RPC_MSG_REQUEST 4711
148#define PTL_RPC_MSG_ERR 4712
149#define PTL_RPC_MSG_REPLY 4713
150
151/* DON'T use swabbed values of MAGIC as magic! */
152#define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
153#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
154
155#define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B
156#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
157
158#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
159
160#define PTLRPC_MSG_VERSION 0x00000003
161#define LUSTRE_VERSION_MASK 0xffff0000
162#define LUSTRE_OBD_VERSION 0x00010000
163#define LUSTRE_MDS_VERSION 0x00020000
164#define LUSTRE_OST_VERSION 0x00030000
165#define LUSTRE_DLM_VERSION 0x00040000
166#define LUSTRE_LOG_VERSION 0x00050000
167#define LUSTRE_MGS_VERSION 0x00060000
168
169typedef __u32 mdsno_t;
170typedef __u64 seqno_t;
171typedef __u64 obd_id;
172typedef __u64 obd_seq;
173typedef __s64 obd_time;
174typedef __u64 obd_size;
175typedef __u64 obd_off;
176typedef __u64 obd_blocks;
177typedef __u64 obd_valid;
178typedef __u32 obd_blksize;
179typedef __u32 obd_mode;
180typedef __u32 obd_uid;
181typedef __u32 obd_gid;
182typedef __u32 obd_flag;
183typedef __u32 obd_count;
184
185/**
186 * Describes a range of sequence, lsr_start is included but lsr_end is
187 * not in the range.
188 * Same structure is used in fld module where lsr_index field holds mdt id
189 * of the home mdt.
190 */
191struct lu_seq_range {
192 __u64 lsr_start;
193 __u64 lsr_end;
194 __u32 lsr_index;
195 __u32 lsr_flags;
196};
197
198#define LU_SEQ_RANGE_MDT 0x0
199#define LU_SEQ_RANGE_OST 0x1
200#define LU_SEQ_RANGE_ANY 0x3
201
202#define LU_SEQ_RANGE_MASK 0x3
203
204static inline unsigned fld_range_type(const struct lu_seq_range *range)
205{
206 return range->lsr_flags & LU_SEQ_RANGE_MASK;
207}
208
209static inline int fld_range_is_ost(const struct lu_seq_range *range)
210{
211 return fld_range_type(range) == LU_SEQ_RANGE_OST;
212}
213
214static inline int fld_range_is_mdt(const struct lu_seq_range *range)
215{
216 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
217}
218
219/**
220 * This all range is only being used when fld client sends fld query request,
221 * but it does not know whether the seq is MDT or OST, so it will send req
222 * with ALL type, which means either seq type gotten from lookup can be
223 * expected.
224 */
225static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
226{
227 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
228}
229
230static inline void fld_range_set_type(struct lu_seq_range *range,
231 unsigned flags)
232{
233 LASSERT(!(flags & ~LU_SEQ_RANGE_MASK));
234 range->lsr_flags |= flags;
235}
236
237static inline void fld_range_set_mdt(struct lu_seq_range *range)
238{
239 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
240}
241
242static inline void fld_range_set_ost(struct lu_seq_range *range)
243{
244 fld_range_set_type(range, LU_SEQ_RANGE_OST);
245}
246
247static inline void fld_range_set_any(struct lu_seq_range *range)
248{
249 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
250}
251
252/**
253 * returns width of given range \a r
254 */
255
256static inline __u64 range_space(const struct lu_seq_range *range)
257{
258 return range->lsr_end - range->lsr_start;
259}
260
261/**
262 * initialize range to zero
263 */
264
265static inline void range_init(struct lu_seq_range *range)
266{
267 range->lsr_start = range->lsr_end = range->lsr_index = 0;
268}
269
270/**
271 * check if given seq id \a s is within given range \a r
272 */
273
274static inline int range_within(const struct lu_seq_range *range,
275 __u64 s)
276{
277 return s >= range->lsr_start && s < range->lsr_end;
278}
279
280static inline int range_is_sane(const struct lu_seq_range *range)
281{
282 return (range->lsr_end >= range->lsr_start);
283}
284
285static inline int range_is_zero(const struct lu_seq_range *range)
286{
287 return (range->lsr_start == 0 && range->lsr_end == 0);
288}
289
290static inline int range_is_exhausted(const struct lu_seq_range *range)
291
292{
293 return range_space(range) == 0;
294}
295
296/* return 0 if two range have the same location */
297static inline int range_compare_loc(const struct lu_seq_range *r1,
298 const struct lu_seq_range *r2)
299{
300 return r1->lsr_index != r2->lsr_index ||
301 r1->lsr_flags != r2->lsr_flags;
302}
303
304#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"
305
306#define PRANGE(range) \
307 (range)->lsr_start, \
308 (range)->lsr_end, \
309 (range)->lsr_index, \
310 fld_range_is_mdt(range) ? "mdt" : "ost"
311
312
313/** \defgroup lu_fid lu_fid
314 * @{ */
315
316/**
317 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
318 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
319 * xattr.
320 */
321enum lma_compat {
322 LMAC_HSM = 0x00000001,
323 LMAC_SOM = 0x00000002,
324};
325
326/**
327 * Masks for all features that should be supported by a Lustre version to
328 * access a specific file.
329 * This information is stored in lustre_mdt_attrs::lma_incompat.
330 */
331enum lma_incompat {
332 LMAI_RELEASED = 0x0000001, /* file is released */
333 LMAI_AGENT = 0x00000002, /* agent inode */
334 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
335 is on the remote MDT */
336};
337#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
338
339extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
340extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
341 const struct lu_fid *fid, __u32 incompat);
342/**
343 * SOM on-disk attributes stored in a separate xattr.
344 */
345struct som_attrs {
346 /** Bitfield for supported data in this structure. For future use. */
347 __u32 som_compat;
348
349 /** Incompat feature list. The supported feature mask is availabe in
350 * SOM_INCOMPAT_SUPP */
351 __u32 som_incompat;
352
353 /** IO Epoch SOM attributes belongs to */
354 __u64 som_ioepoch;
355 /** total file size in objects */
356 __u64 som_size;
357 /** total fs blocks in objects */
358 __u64 som_blocks;
359 /** mds mount id the size is valid for */
360 __u64 som_mountid;
361};
362extern void lustre_som_swab(struct som_attrs *attrs);
363
364#define SOM_INCOMPAT_SUPP 0x0
365
366/**
367 * HSM on-disk attributes stored in a separate xattr.
368 */
369struct hsm_attrs {
370 /** Bitfield for supported data in this structure. For future use. */
371 __u32 hsm_compat;
372
373 /** HSM flags, see hsm_flags enum below */
374 __u32 hsm_flags;
375 /** backend archive id associated with the file */
376 __u64 hsm_arch_id;
377 /** version associated with the last archiving, if any */
378 __u64 hsm_arch_ver;
379};
380extern void lustre_hsm_swab(struct hsm_attrs *attrs);
381
382/**
383 * fid constants
384 */
385enum {
66cc83e9
MP
386 /** LASTID file has zero OID */
387 LUSTRE_FID_LASTID_OID = 0UL,
d7e09d03
PT
388 /** initial fid id value */
389 LUSTRE_FID_INIT_OID = 1UL
390};
391
392/** returns fid object sequence */
393static inline __u64 fid_seq(const struct lu_fid *fid)
394{
395 return fid->f_seq;
396}
397
398/** returns fid object id */
399static inline __u32 fid_oid(const struct lu_fid *fid)
400{
401 return fid->f_oid;
402}
403
404/** returns fid object version */
405static inline __u32 fid_ver(const struct lu_fid *fid)
406{
407 return fid->f_ver;
408}
409
410static inline void fid_zero(struct lu_fid *fid)
411{
412 memset(fid, 0, sizeof(*fid));
413}
414
415static inline obd_id fid_ver_oid(const struct lu_fid *fid)
416{
417 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
418}
419
420/**
421 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
422 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
423 * used for other purposes and not risk collisions with existing inodes.
424 *
425 * Different FID Format
426 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
427 */
428enum fid_seq {
429 FID_SEQ_OST_MDT0 = 0,
430 FID_SEQ_LLOG = 1, /* unnamed llogs */
431 FID_SEQ_ECHO = 2,
432 FID_SEQ_OST_MDT1 = 3,
433 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
434 FID_SEQ_LLOG_NAME = 10, /* named llogs */
435 FID_SEQ_RSVD = 11,
436 FID_SEQ_IGIF = 12,
437 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
438 FID_SEQ_IDIF = 0x100000000ULL,
439 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
440 /* Normal FID sequence starts from this value, i.e. 1<<33 */
441 FID_SEQ_START = 0x200000000ULL,
442 /* sequence for local pre-defined FIDs listed in local_oid */
443 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
444 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
445 /* sequence is used for local named objects FIDs generated
446 * by local_object_storage library */
447 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
448 /* Because current FLD will only cache the fid sequence, instead
449 * of oid on the client side, if the FID needs to be exposed to
450 * clients sides, it needs to make sure all of fids under one
451 * sequence will be located in one MDT. */
452 FID_SEQ_SPECIAL = 0x200000004ULL,
453 FID_SEQ_QUOTA = 0x200000005ULL,
454 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
455 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
456 FID_SEQ_NORMAL = 0x200000400ULL,
457 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
458};
459
460#define OBIF_OID_MAX_BITS 32
461#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
462#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
463#define IDIF_OID_MAX_BITS 48
464#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
465#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
466
467/** OID for FID_SEQ_SPECIAL */
468enum special_oid {
469 /* Big Filesystem Lock to serialize rename operations */
470 FID_OID_SPECIAL_BFL = 1UL,
471};
472
473/** OID for FID_SEQ_DOT_LUSTRE */
474enum dot_lustre_oid {
475 FID_OID_DOT_LUSTRE = 1UL,
476 FID_OID_DOT_LUSTRE_OBF = 2UL,
477};
478
479static inline int fid_seq_is_mdt0(obd_seq seq)
480{
481 return (seq == FID_SEQ_OST_MDT0);
482}
483
484static inline int fid_seq_is_mdt(const __u64 seq)
485{
486 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
487};
488
489static inline int fid_seq_is_echo(obd_seq seq)
490{
491 return (seq == FID_SEQ_ECHO);
492}
493
494static inline int fid_is_echo(const struct lu_fid *fid)
495{
496 return fid_seq_is_echo(fid_seq(fid));
497}
498
499static inline int fid_seq_is_llog(obd_seq seq)
500{
501 return (seq == FID_SEQ_LLOG);
502}
503
504static inline int fid_is_llog(const struct lu_fid *fid)
505{
66cc83e9
MP
506 /* file with OID == 0 is not llog but contains last oid */
507 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
d7e09d03
PT
508}
509
510static inline int fid_seq_is_rsvd(const __u64 seq)
511{
512 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
513};
514
515static inline int fid_seq_is_special(const __u64 seq)
516{
517 return seq == FID_SEQ_SPECIAL;
518};
519
520static inline int fid_seq_is_local_file(const __u64 seq)
521{
522 return seq == FID_SEQ_LOCAL_FILE ||
523 seq == FID_SEQ_LOCAL_NAME;
524};
525
526static inline int fid_seq_is_root(const __u64 seq)
527{
528 return seq == FID_SEQ_ROOT;
529}
530
531static inline int fid_seq_is_dot(const __u64 seq)
532{
533 return seq == FID_SEQ_DOT_LUSTRE;
534}
535
536static inline int fid_seq_is_default(const __u64 seq)
537{
538 return seq == FID_SEQ_LOV_DEFAULT;
539}
540
541static inline int fid_is_mdt0(const struct lu_fid *fid)
542{
543 return fid_seq_is_mdt0(fid_seq(fid));
544}
545
546static inline void lu_root_fid(struct lu_fid *fid)
547{
548 fid->f_seq = FID_SEQ_ROOT;
549 fid->f_oid = 1;
550 fid->f_ver = 0;
551}
552
553/**
554 * Check if a fid is igif or not.
555 * \param fid the fid to be tested.
556 * \return true if the fid is a igif; otherwise false.
557 */
558static inline int fid_seq_is_igif(const __u64 seq)
559{
560 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
561}
562
563static inline int fid_is_igif(const struct lu_fid *fid)
564{
565 return fid_seq_is_igif(fid_seq(fid));
566}
567
568/**
569 * Check if a fid is idif or not.
570 * \param fid the fid to be tested.
571 * \return true if the fid is a idif; otherwise false.
572 */
573static inline int fid_seq_is_idif(const __u64 seq)
574{
575 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
576}
577
578static inline int fid_is_idif(const struct lu_fid *fid)
579{
580 return fid_seq_is_idif(fid_seq(fid));
581}
582
583static inline int fid_is_local_file(const struct lu_fid *fid)
584{
585 return fid_seq_is_local_file(fid_seq(fid));
586}
587
588static inline int fid_seq_is_norm(const __u64 seq)
589{
590 return (seq >= FID_SEQ_NORMAL);
591}
592
593static inline int fid_is_norm(const struct lu_fid *fid)
594{
595 return fid_seq_is_norm(fid_seq(fid));
596}
597
598/* convert an OST objid into an IDIF FID SEQ number */
599static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx)
600{
601 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
602}
603
604/* convert a packed IDIF FID into an OST objid */
605static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
606{
607 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
608}
609
610/* extract ost index from IDIF FID */
611static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
612{
613 LASSERT(fid_is_idif(fid));
614 return (fid_seq(fid) >> 16) & 0xffff;
615}
616
617/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
618static inline obd_seq ostid_seq(const struct ost_id *ostid)
619{
620 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
621 return FID_SEQ_OST_MDT0;
622
623 if (fid_seq_is_default(ostid->oi.oi_seq))
624 return FID_SEQ_LOV_DEFAULT;
625
626 if (fid_is_idif(&ostid->oi_fid))
627 return FID_SEQ_OST_MDT0;
628
629 return fid_seq(&ostid->oi_fid);
630}
631
632/* extract OST objid from a wire ost_id (id/seq) pair */
633static inline obd_id ostid_id(const struct ost_id *ostid)
634{
635 if (fid_seq_is_mdt0(ostid_seq(ostid)))
636 return ostid->oi.oi_id & IDIF_OID_MASK;
637
638 if (fid_is_idif(&ostid->oi_fid))
639 return fid_idif_id(fid_seq(&ostid->oi_fid),
640 fid_oid(&ostid->oi_fid), 0);
641
642 return fid_oid(&ostid->oi_fid);
643}
644
645static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
646{
647 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
648 oi->oi.oi_seq = seq;
649 } else {
650 oi->oi_fid.f_seq = seq;
651 /* Note: if f_oid + f_ver is zero, we need init it
652 * to be 1, otherwise, ostid_seq will treat this
653 * as old ostid (oi_seq == 0) */
654 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
655 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
656 }
657}
658
659static inline void ostid_set_seq_mdt0(struct ost_id *oi)
660{
661 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
662}
663
664static inline void ostid_set_seq_echo(struct ost_id *oi)
665{
666 ostid_set_seq(oi, FID_SEQ_ECHO);
667}
668
669static inline void ostid_set_seq_llog(struct ost_id *oi)
670{
671 ostid_set_seq(oi, FID_SEQ_LLOG);
672}
673
674/**
675 * Note: we need check oi_seq to decide where to set oi_id,
676 * so oi_seq should always be set ahead of oi_id.
677 */
678static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
679{
680 if (fid_seq_is_mdt0(ostid_seq(oi))) {
681 if (oid >= IDIF_MAX_OID) {
682 CERROR("Bad "LPU64" to set "DOSTID"\n",
683 oid, POSTID(oi));
684 return;
685 }
686 oi->oi.oi_id = oid;
687 } else {
688 if (oid > OBIF_MAX_OID) {
689 CERROR("Bad "LPU64" to set "DOSTID"\n",
690 oid, POSTID(oi));
691 return;
692 }
693 oi->oi_fid.f_oid = oid;
694 }
695}
696
697static inline void ostid_inc_id(struct ost_id *oi)
698{
699 if (fid_seq_is_mdt0(ostid_seq(oi))) {
700 if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
701 CERROR("Bad inc "DOSTID"\n", POSTID(oi));
702 return;
703 }
704 oi->oi.oi_id++;
705 } else {
706 oi->oi_fid.f_oid++;
707 }
708}
709
710static inline void ostid_dec_id(struct ost_id *oi)
711{
712 if (fid_seq_is_mdt0(ostid_seq(oi)))
713 oi->oi.oi_id--;
714 else
715 oi->oi_fid.f_oid--;
716}
717
718/**
719 * Unpack an OST object id/seq (group) into a FID. This is needed for
720 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
721 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
722 * be passed through unchanged. Only legacy OST objects in "group 0"
723 * will be mapped into the IDIF namespace so that they can fit into the
724 * struct lu_fid fields without loss. For reference see:
725 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
726 */
727static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
728 __u32 ost_idx)
729{
730 if (ost_idx > 0xffff) {
731 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
732 ost_idx);
733 return -EBADF;
734 }
735
736 if (fid_seq_is_mdt0(ostid_seq(ostid))) {
737 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
738 * that we map into the IDIF namespace. It allows up to 2^48
739 * objects per OST, as this is the object namespace that has
740 * been in production for years. This can handle create rates
741 * of 1M objects/s/OST for 9 years, or combinations thereof. */
742 if (ostid_id(ostid) >= IDIF_MAX_OID) {
743 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
744 POSTID(ostid), ost_idx);
745 return -EBADF;
746 }
747 fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
748 /* truncate to 32 bits by assignment */
749 fid->f_oid = ostid_id(ostid);
750 /* in theory, not currently used */
751 fid->f_ver = ostid_id(ostid) >> 48;
752 } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
753 /* This is either an IDIF object, which identifies objects across
754 * all OSTs, or a regular FID. The IDIF namespace maps legacy
755 * OST objects into the FID namespace. In both cases, we just
756 * pass the FID through, no conversion needed. */
757 if (ostid->oi_fid.f_ver != 0) {
758 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
759 POSTID(ostid), ost_idx);
760 return -EBADF;
761 }
762 *fid = ostid->oi_fid;
763 }
764
765 return 0;
766}
767
768/* pack any OST FID into an ostid (id/seq) for the wire/disk */
769static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
770{
771 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
772 CERROR("bad IGIF, "DFID"\n", PFID(fid));
773 return -EBADF;
774 }
775
776 if (fid_is_idif(fid)) {
777 ostid_set_seq_mdt0(ostid);
778 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
779 fid_ver(fid)));
780 } else {
781 ostid->oi_fid = *fid;
782 }
783
784 return 0;
785}
786
787/* Check whether the fid is for LAST_ID */
788static inline int fid_is_last_id(const struct lu_fid *fid)
789{
66cc83e9 790 return (fid_oid(fid) == 0);
d7e09d03
PT
791}
792
793/**
794 * Get inode number from a igif.
795 * \param fid a igif to get inode number from.
796 * \return inode number for the igif.
797 */
798static inline ino_t lu_igif_ino(const struct lu_fid *fid)
799{
800 return fid_seq(fid);
801}
802
803extern void lustre_swab_ost_id(struct ost_id *oid);
804
805/**
806 * Get inode generation from a igif.
807 * \param fid a igif to get inode generation from.
808 * \return inode generation for the igif.
809 */
810static inline __u32 lu_igif_gen(const struct lu_fid *fid)
811{
812 return fid_oid(fid);
813}
814
815/**
816 * Build igif from the inode number/generation.
817 */
818static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
819{
820 fid->f_seq = ino;
821 fid->f_oid = gen;
822 fid->f_ver = 0;
823}
824
825/*
826 * Fids are transmitted across network (in the sender byte-ordering),
827 * and stored on disk in big-endian order.
828 */
829static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
830{
831 /* check that all fields are converted */
832 CLASSERT(sizeof *src ==
833 sizeof fid_seq(src) +
834 sizeof fid_oid(src) + sizeof fid_ver(src));
835 dst->f_seq = cpu_to_le64(fid_seq(src));
836 dst->f_oid = cpu_to_le32(fid_oid(src));
837 dst->f_ver = cpu_to_le32(fid_ver(src));
838}
839
840static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
841{
842 /* check that all fields are converted */
843 CLASSERT(sizeof *src ==
844 sizeof fid_seq(src) +
845 sizeof fid_oid(src) + sizeof fid_ver(src));
846 dst->f_seq = le64_to_cpu(fid_seq(src));
847 dst->f_oid = le32_to_cpu(fid_oid(src));
848 dst->f_ver = le32_to_cpu(fid_ver(src));
849}
850
851static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
852{
853 /* check that all fields are converted */
854 CLASSERT(sizeof *src ==
855 sizeof fid_seq(src) +
856 sizeof fid_oid(src) + sizeof fid_ver(src));
857 dst->f_seq = cpu_to_be64(fid_seq(src));
858 dst->f_oid = cpu_to_be32(fid_oid(src));
859 dst->f_ver = cpu_to_be32(fid_ver(src));
860}
861
862static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
863{
864 /* check that all fields are converted */
865 CLASSERT(sizeof *src ==
866 sizeof fid_seq(src) +
867 sizeof fid_oid(src) + sizeof fid_ver(src));
868 dst->f_seq = be64_to_cpu(fid_seq(src));
869 dst->f_oid = be32_to_cpu(fid_oid(src));
870 dst->f_ver = be32_to_cpu(fid_ver(src));
871}
872
873static inline int fid_is_sane(const struct lu_fid *fid)
874{
875 return fid != NULL &&
876 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
877 fid_is_igif(fid) || fid_is_idif(fid) ||
878 fid_seq_is_rsvd(fid_seq(fid)));
879}
880
881static inline int fid_is_zero(const struct lu_fid *fid)
882{
883 return fid_seq(fid) == 0 && fid_oid(fid) == 0;
884}
885
886extern void lustre_swab_lu_fid(struct lu_fid *fid);
887extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
888
889static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
890{
891 /* Check that there is no alignment padding. */
892 CLASSERT(sizeof *f0 ==
893 sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
894 return memcmp(f0, f1, sizeof *f0) == 0;
895}
896
897#define __diff_normalize(val0, val1) \
898({ \
899 typeof(val0) __val0 = (val0); \
900 typeof(val1) __val1 = (val1); \
901 \
902 (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
903})
904
905static inline int lu_fid_cmp(const struct lu_fid *f0,
906 const struct lu_fid *f1)
907{
908 return
909 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
910 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
911 __diff_normalize(fid_ver(f0), fid_ver(f1));
912}
913
c5b60ba7 914static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
d7e09d03
PT
915 struct ost_id *dst_oi)
916{
917 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
918 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
919 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
920 } else {
921 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
922 }
923}
924
c5b60ba7 925static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
d7e09d03
PT
926 struct ost_id *dst_oi)
927{
928 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
929 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
930 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
931 } else {
932 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
933 }
934}
935
936/** @} lu_fid */
937
938/** \defgroup lu_dir lu_dir
939 * @{ */
940
941/**
942 * Enumeration of possible directory entry attributes.
943 *
944 * Attributes follow directory entry header in the order they appear in this
945 * enumeration.
946 */
947enum lu_dirent_attrs {
948 LUDA_FID = 0x0001,
949 LUDA_TYPE = 0x0002,
950 LUDA_64BITHASH = 0x0004,
951
952 /* The following attrs are used for MDT interanl only,
953 * not visible to client */
954
955 /* Verify the dirent consistency */
956 LUDA_VERIFY = 0x8000,
957 /* Only check but not repair the dirent inconsistency */
958 LUDA_VERIFY_DRYRUN = 0x4000,
959 /* The dirent has been repaired, or to be repaired (dryrun). */
960 LUDA_REPAIR = 0x2000,
961 /* The system is upgraded, has beed or to be repaired (dryrun). */
962 LUDA_UPGRADE = 0x1000,
963 /* Ignore this record, go to next directly. */
964 LUDA_IGNORE = 0x0800,
965};
966
967#define LU_DIRENT_ATTRS_MASK 0xf800
968
969/**
970 * Layout of readdir pages, as transmitted on wire.
971 */
972struct lu_dirent {
973 /** valid if LUDA_FID is set. */
974 struct lu_fid lde_fid;
975 /** a unique entry identifier: a hash or an offset. */
976 __u64 lde_hash;
977 /** total record length, including all attributes. */
978 __u16 lde_reclen;
979 /** name length */
980 __u16 lde_namelen;
981 /** optional variable size attributes following this entry.
982 * taken from enum lu_dirent_attrs.
983 */
984 __u32 lde_attrs;
985 /** name is followed by the attributes indicated in ->ldp_attrs, in
986 * their natural order. After the last attribute, padding bytes are
987 * added to make ->lde_reclen a multiple of 8.
988 */
989 char lde_name[0];
990};
991
992/*
993 * Definitions of optional directory entry attributes formats.
994 *
995 * Individual attributes do not have their length encoded in a generic way. It
996 * is assumed that consumer of an attribute knows its format. This means that
997 * it is impossible to skip over an unknown attribute, except by skipping over all
998 * remaining attributes (by using ->lde_reclen), which is not too
999 * constraining, because new server versions will append new attributes at
1000 * the end of an entry.
1001 */
1002
1003/**
1004 * Fid directory attribute: a fid of an object referenced by the entry. This
1005 * will be almost always requested by the client and supplied by the server.
1006 *
1007 * Aligned to 8 bytes.
1008 */
1009/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
1010
1011/**
1012 * File type.
1013 *
1014 * Aligned to 2 bytes.
1015 */
1016struct luda_type {
1017 __u16 lt_type;
1018};
1019
1020struct lu_dirpage {
1021 __u64 ldp_hash_start;
1022 __u64 ldp_hash_end;
1023 __u32 ldp_flags;
1024 __u32 ldp_pad0;
1025 struct lu_dirent ldp_entries[0];
1026};
1027
1028enum lu_dirpage_flags {
1029 /**
1030 * dirpage contains no entry.
1031 */
1032 LDF_EMPTY = 1 << 0,
1033 /**
1034 * last entry's lde_hash equals ldp_hash_end.
1035 */
1036 LDF_COLLIDE = 1 << 1
1037};
1038
1039static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
1040{
1041 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
1042 return NULL;
1043 else
1044 return dp->ldp_entries;
1045}
1046
1047static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
1048{
1049 struct lu_dirent *next;
1050
1051 if (le16_to_cpu(ent->lde_reclen) != 0)
1052 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1053 else
1054 next = NULL;
1055
1056 return next;
1057}
1058
1059static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1060{
1061 int size;
1062
1063 if (attr & LUDA_TYPE) {
1064 const unsigned align = sizeof(struct luda_type) - 1;
1065 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1066 size += sizeof(struct luda_type);
1067 } else
1068 size = sizeof(struct lu_dirent) + namelen;
1069
1070 return (size + 7) & ~7;
1071}
1072
1073static inline int lu_dirent_size(struct lu_dirent *ent)
1074{
1075 if (le16_to_cpu(ent->lde_reclen) == 0) {
1076 return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1077 le32_to_cpu(ent->lde_attrs));
1078 }
1079 return le16_to_cpu(ent->lde_reclen);
1080}
1081
1082#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1083
1084/**
1085 * MDS_READPAGE page size
1086 *
1087 * This is the directory page size packed in MDS_READPAGE RPC.
1088 * It's different than PAGE_CACHE_SIZE because the client needs to
1089 * access the struct lu_dirpage header packed at the beginning of
1090 * the "page" and without this there isn't any way to know find the
1091 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1092 */
1093#define LU_PAGE_SHIFT 12
1094#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1095#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1096
1097#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1098
1099/** @} lu_dir */
1100
1101struct lustre_handle {
1102 __u64 cookie;
1103};
1104#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1105
1106static inline int lustre_handle_is_used(struct lustre_handle *lh)
1107{
1108 return lh->cookie != 0ull;
1109}
1110
1111static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1112 const struct lustre_handle *lh2)
1113{
1114 return lh1->cookie == lh2->cookie;
1115}
1116
1117static inline void lustre_handle_copy(struct lustre_handle *tgt,
1118 struct lustre_handle *src)
1119{
1120 tgt->cookie = src->cookie;
1121}
1122
1123/* flags for lm_flags */
1124#define MSGHDR_AT_SUPPORT 0x1
1125#define MSGHDR_CKSUM_INCOMPAT18 0x2
1126
1127#define lustre_msg lustre_msg_v2
1128/* we depend on this structure to be 8-byte aligned */
1129/* this type is only endian-adjusted in lustre_unpack_msg() */
1130struct lustre_msg_v2 {
1131 __u32 lm_bufcount;
1132 __u32 lm_secflvr;
1133 __u32 lm_magic;
1134 __u32 lm_repsize;
1135 __u32 lm_cksum;
1136 __u32 lm_flags;
1137 __u32 lm_padding_2;
1138 __u32 lm_padding_3;
1139 __u32 lm_buflens[0];
1140};
1141
1142/* without gss, ptlrpc_body is put at the first buffer. */
1143#define PTLRPC_NUM_VERSIONS 4
1144#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
1145struct ptlrpc_body_v3 {
1146 struct lustre_handle pb_handle;
1147 __u32 pb_type;
1148 __u32 pb_version;
1149 __u32 pb_opc;
1150 __u32 pb_status;
1151 __u64 pb_last_xid;
1152 __u64 pb_last_seen;
1153 __u64 pb_last_committed;
1154 __u64 pb_transno;
1155 __u32 pb_flags;
1156 __u32 pb_op_flags;
1157 __u32 pb_conn_cnt;
1158 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1159 __u32 pb_service_time; /* for rep, actual service time */
1160 __u32 pb_limit;
1161 __u64 pb_slv;
1162 /* VBR: pre-versions */
1163 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1164 /* padding for future needs */
1165 __u64 pb_padding[4];
1166 char pb_jobid[JOBSTATS_JOBID_SIZE];
1167};
1168#define ptlrpc_body ptlrpc_body_v3
1169
1170struct ptlrpc_body_v2 {
1171 struct lustre_handle pb_handle;
1172 __u32 pb_type;
1173 __u32 pb_version;
1174 __u32 pb_opc;
1175 __u32 pb_status;
1176 __u64 pb_last_xid;
1177 __u64 pb_last_seen;
1178 __u64 pb_last_committed;
1179 __u64 pb_transno;
1180 __u32 pb_flags;
1181 __u32 pb_op_flags;
1182 __u32 pb_conn_cnt;
1183 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1184 __u32 pb_service_time; /* for rep, actual service time, also used for
1185 net_latency of req */
1186 __u32 pb_limit;
1187 __u64 pb_slv;
1188 /* VBR: pre-versions */
1189 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1190 /* padding for future needs */
1191 __u64 pb_padding[4];
1192};
1193
1194extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1195
1196/* message body offset for lustre_msg_v2 */
1197/* ptlrpc body offset in all request/reply messages */
1198#define MSG_PTLRPC_BODY_OFF 0
1199
1200/* normal request/reply message record offset */
1201#define REQ_REC_OFF 1
1202#define REPLY_REC_OFF 1
1203
1204/* ldlm request message body offset */
1205#define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1206#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1207
1208/* ldlm intent lock message body offset */
1209#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1210#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1211
1212/* ldlm reply message body offset */
1213#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1214#define DLM_REPLY_REC_OFF 2 /* reply record offset */
1215
1216/** only use in req->rq_{req,rep}_swab_mask */
1217#define MSG_PTLRPC_HEADER_OFF 31
1218
1219/* Flags that are operation-specific go in the top 16 bits. */
1220#define MSG_OP_FLAG_MASK 0xffff0000
1221#define MSG_OP_FLAG_SHIFT 16
1222
1223/* Flags that apply to all requests are in the bottom 16 bits */
1224#define MSG_GEN_FLAG_MASK 0x0000ffff
1225#define MSG_LAST_REPLAY 0x0001
1226#define MSG_RESENT 0x0002
1227#define MSG_REPLAY 0x0004
1228/* #define MSG_AT_SUPPORT 0x0008
1229 * This was used in early prototypes of adaptive timeouts, and while there
1230 * shouldn't be any users of that code there also isn't a need for using this
1231 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1232#define MSG_DELAY_REPLAY 0x0010
1233#define MSG_VERSION_REPLAY 0x0020
1234#define MSG_REQ_REPLAY_DONE 0x0040
1235#define MSG_LOCK_REPLAY_DONE 0x0080
1236
1237/*
1238 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1239 */
1240
1241#define MSG_CONNECT_RECOVERING 0x00000001
1242#define MSG_CONNECT_RECONNECT 0x00000002
1243#define MSG_CONNECT_REPLAYABLE 0x00000004
1244//#define MSG_CONNECT_PEER 0x8
1245#define MSG_CONNECT_LIBCLIENT 0x00000010
1246#define MSG_CONNECT_INITIAL 0x00000020
1247#define MSG_CONNECT_ASYNC 0x00000040
1248#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1249#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1250
1251/* Connect flags */
1252#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1253#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1254#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1255#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1256#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1257#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1258#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1259#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1260#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1261#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1262#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1263#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1264#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1265#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1266 *We do not support JOIN FILE
1267 *anymore, reserve this flags
1268 *just for preventing such bit
1269 *to be reused.*/
1270#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1271#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1272#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1273#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1274#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1275#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1276#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1277#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1278#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1279#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1280#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1281#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1282#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1283#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1284#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1285#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1286#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1287#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1288#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1289#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1290#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1291#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1292#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1293#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1294#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1295 * directory hash */
1296#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1297#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1298#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1299#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1300#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1301 * RPC error properly */
1302#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1303 * finer space reservation */
1304#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1305 * policy and 2.x server */
1306#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1307#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1308#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1309#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1310#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1311/* XXX README XXX:
1312 * Please DO NOT add flag values here before first ensuring that this same
1313 * flag value is not in use on some other branch. Please clear any such
1314 * changes with senior engineers before starting to use a new flag. Then,
1315 * submit a small patch against EVERY branch that ONLY adds the new flag,
1316 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1317 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1318 * can be approved and landed easily to reserve the flag for future use. */
1319
1320/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1321 * connection. It is a temporary bug fix for Imperative Recovery interop
1322 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1323 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
1324#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1325
1326#define OCD_HAS_FLAG(ocd, flg) \
1327 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1328
1329
1330#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1331
1332#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1333 OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1334 OBD_CONNECT_IBITS | \
1335 OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1336 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1337 OBD_CONNECT_RMT_CLIENT | \
1338 OBD_CONNECT_RMT_CLIENT_FORCE | \
1339 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
1340 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
1341 OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1342 OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1343 OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
1344 OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1345 OBD_CONNECT_EINPROGRESS | \
1346 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1347 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1348 OBD_CONNECT_PINGLESS)
1349#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1350 OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1351 OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1352 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
1353 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1354 LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1355 OBD_CONNECT_RMT_CLIENT | \
1356 OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1357 OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1358 OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1359 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1360 OBD_CONNECT_MAX_EASIZE | \
1361 OBD_CONNECT_EINPROGRESS | \
1362 OBD_CONNECT_JOBSTATS | \
1363 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1364 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1365 OBD_CONNECT_PINGLESS)
1366#define ECHO_CONNECT_SUPPORTED (0)
1367#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1368 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1369 OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
1370
1371/* Features required for this version of the client to work with server */
1372#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1373 OBD_CONNECT_FULL20)
1374
1375#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\
1376 ((patch)<<8) + (fix))
1377#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1378#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1379#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1380#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
1381
1382/* This structure is used for both request and reply.
1383 *
1384 * If we eventually have separate connect data for different types, which we
1385 * almost certainly will, then perhaps we stick a union in here. */
1386struct obd_connect_data_v1 {
1387 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1388 __u32 ocd_version; /* lustre release version number */
1389 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1390 __u32 ocd_index; /* LOV index to connect to */
1391 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1392 __u64 ocd_ibits_known; /* inode bits this client understands */
1393 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1394 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1395 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1396 __u32 ocd_unused; /* also fix lustre_swab_connect */
1397 __u64 ocd_transno; /* first transno from client to be replayed */
1398 __u32 ocd_group; /* MDS group on OST */
1399 __u32 ocd_cksum_types; /* supported checksum algorithms */
1400 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1401 __u32 ocd_instance; /* also fix lustre_swab_connect */
1402 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1403};
1404
1405struct obd_connect_data {
1406 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1407 __u32 ocd_version; /* lustre release version number */
1408 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1409 __u32 ocd_index; /* LOV index to connect to */
1410 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1411 __u64 ocd_ibits_known; /* inode bits this client understands */
1412 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1413 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1414 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1415 __u32 ocd_unused; /* also fix lustre_swab_connect */
1416 __u64 ocd_transno; /* first transno from client to be replayed */
1417 __u32 ocd_group; /* MDS group on OST */
1418 __u32 ocd_cksum_types; /* supported checksum algorithms */
1419 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1420 __u32 ocd_instance; /* instance # of this target */
1421 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1422 /* Fields after ocd_maxbytes are only accessible by the receiver
1423 * if the corresponding flag in ocd_connect_flags is set. Accessing
1424 * any field after ocd_maxbytes on the receiver without a valid flag
1425 * may result in out-of-bound memory access and kernel oops. */
1426 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1427 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1428 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1429 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1430 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1431 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1432 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1433 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1434 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1435 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1436 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1437 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1438 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1439 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1440 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1441};
1442/* XXX README XXX:
1443 * Please DO NOT use any fields here before first ensuring that this same
1444 * field is not in use on some other branch. Please clear any such changes
1445 * with senior engineers before starting to use a new field. Then, submit
1446 * a small patch against EVERY branch that ONLY adds the new field along with
1447 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1448 * reserve the flag for future use. */
1449
1450
1451extern void lustre_swab_connect(struct obd_connect_data *ocd);
1452
1453/*
1454 * Supported checksum algorithms. Up to 32 checksum types are supported.
1455 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1456 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1457 * algorithm and also the OBD_FL_CKSUM* flags.
1458 */
1459typedef enum {
1460 OBD_CKSUM_CRC32 = 0x00000001,
1461 OBD_CKSUM_ADLER = 0x00000002,
1462 OBD_CKSUM_CRC32C= 0x00000004,
1463} cksum_type_t;
1464
1465/*
1466 * OST requests: OBDO & OBD request records
1467 */
1468
1469/* opcodes */
1470typedef enum {
1471 OST_REPLY = 0, /* reply ? */
1472 OST_GETATTR = 1,
1473 OST_SETATTR = 2,
1474 OST_READ = 3,
1475 OST_WRITE = 4,
1476 OST_CREATE = 5,
1477 OST_DESTROY = 6,
1478 OST_GET_INFO = 7,
1479 OST_CONNECT = 8,
1480 OST_DISCONNECT = 9,
1481 OST_PUNCH = 10,
1482 OST_OPEN = 11,
1483 OST_CLOSE = 12,
1484 OST_STATFS = 13,
1485 OST_SYNC = 16,
1486 OST_SET_INFO = 17,
1487 OST_QUOTACHECK = 18,
1488 OST_QUOTACTL = 19,
1489 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1490 OST_LAST_OPC
1491} ost_cmd_t;
1492#define OST_FIRST_OPC OST_REPLY
1493
1494enum obdo_flags {
1495 OBD_FL_INLINEDATA = 0x00000001,
1496 OBD_FL_OBDMDEXISTS = 0x00000002,
1497 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1498 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1499 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1500 OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
1501 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1502 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1503 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1504 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1505 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1506 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1507 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1508 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1509 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1510 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1511 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1512 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1513 * XXX: obsoleted - reserved for old
1514 * clients prior than 2.2 */
1515 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1516 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1517
1518 /* Note that while these checksum values are currently separate bits,
1519 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1520 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1521 OBD_FL_CKSUM_CRC32C,
1522
1523 /* mask for local-only flag, which won't be sent over network */
1524 OBD_FL_LOCAL_MASK = 0xF0000000,
1525};
1526
1527#define LOV_MAGIC_V1 0x0BD10BD0
1528#define LOV_MAGIC LOV_MAGIC_V1
1529#define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1530#define LOV_MAGIC_V3 0x0BD30BD0
1531
1532/*
1533 * magic for fully defined striping
1534 * the idea is that we should have different magics for striping "hints"
1535 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1536 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1537 * we can't just change it w/o long way preparation, but we still need a
1538 * mechanism to allow LOD to differentiate hint versus ready striping.
1539 * so, at the moment we do a trick: MDT knows what to expect from request
1540 * depending on the case (replay uses ready striping, non-replay req uses
1541 * hints), so MDT replaces magic with appropriate one and now LOD can
1542 * easily understand what's inside -bzzz
1543 */
1544#define LOV_MAGIC_V1_DEF 0x0CD10BD0
1545#define LOV_MAGIC_V3_DEF 0x0CD30BD0
1546
1547#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
1548#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
1549#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
1550#define LOV_PATTERN_CMOBD 0x200
1551
1552#define lov_ost_data lov_ost_data_v1
1553struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1554 struct ost_id l_ost_oi; /* OST object ID */
1555 __u32 l_ost_gen; /* generation of this l_ost_idx */
1556 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1557};
1558
1559#define lov_mds_md lov_mds_md_v1
1560struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1561 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1562 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1563 struct ost_id lmm_oi; /* LOV object ID */
1564 __u32 lmm_stripe_size; /* size of stripe in bytes */
1565 /* lmm_stripe_count used to be __u32 */
1566 __u16 lmm_stripe_count; /* num stripes in use for this object */
1567 __u16 lmm_layout_gen; /* layout generation number */
1568 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1569};
1570
1571/**
1572 * Sigh, because pre-2.4 uses
1573 * struct lov_mds_md_v1 {
1574 * ........
1575 * __u64 lmm_object_id;
1576 * __u64 lmm_object_seq;
1577 * ......
1578 * }
1579 * to identify the LOV(MDT) object, and lmm_object_seq will
1580 * be normal_fid, which make it hard to combine these conversion
1581 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1582 *
1583 * We can tell the lmm_oi by this way,
1584 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1585 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1586 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1587 * lmm_oi.f_ver = 0
1588 *
1589 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1590 * except for printing some information, and the user can always
1591 * get the real FID from LMA, besides this multiple case check might
1592 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1593 */
1594
1595static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1596 struct ost_id *oi)
1597{
1598 oi->oi.oi_id = fid_oid(fid);
1599 oi->oi.oi_seq = fid_seq(fid);
1600}
1601
1602static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1603{
1604 oi->oi.oi_seq = seq;
1605}
1606
1607static inline __u64 lmm_oi_id(struct ost_id *oi)
1608{
1609 return oi->oi.oi_id;
1610}
1611
1612static inline __u64 lmm_oi_seq(struct ost_id *oi)
1613{
1614 return oi->oi.oi_seq;
1615}
1616
1617static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1618 struct ost_id *src_oi)
1619{
1620 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1621 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1622}
1623
1624static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1625 struct ost_id *src_oi)
1626{
1627 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1628 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1629}
1630
1631/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1632
1633#define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1634#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1635
1636#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1637#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1638#define XATTR_USER_PREFIX "user."
1639#define XATTR_TRUSTED_PREFIX "trusted."
1640#define XATTR_SECURITY_PREFIX "security."
1641#define XATTR_LUSTRE_PREFIX "lustre."
1642
1643#define XATTR_NAME_LOV "trusted.lov"
1644#define XATTR_NAME_LMA "trusted.lma"
1645#define XATTR_NAME_LMV "trusted.lmv"
1646#define XATTR_NAME_LINK "trusted.link"
1647#define XATTR_NAME_FID "trusted.fid"
1648#define XATTR_NAME_VERSION "trusted.version"
1649#define XATTR_NAME_SOM "trusted.som"
1650#define XATTR_NAME_HSM "trusted.hsm"
1651#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1652
1653struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1654 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1655 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1656 struct ost_id lmm_oi; /* LOV object ID */
1657 __u32 lmm_stripe_size; /* size of stripe in bytes */
1658 /* lmm_stripe_count used to be __u32 */
1659 __u16 lmm_stripe_count; /* num stripes in use for this object */
1660 __u16 lmm_layout_gen; /* layout generation number */
1661 char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1662 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1663};
1664
1665#define OBD_MD_FLID (0x00000001ULL) /* object ID */
1666#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1667#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1668#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1669#define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1670#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1671#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1672#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1673#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1674#define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1675#define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1676#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1677#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1678#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1679/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1680#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1681#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1682#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1683#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1684#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1685#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1686/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1687#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1688#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1689#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1690#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1691 /* ->mds if epoch opens or closes */
1692#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1693#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1694#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1695#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1696#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1697
1698#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1699#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1700#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1701
1702/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire,
1703 * and it is already obsolete since 2.3 */
1704/* #define OBD_MD_MDTIDX (0x0000000800000000ULL) */
1705
1706#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1707#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1708#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1709#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1710#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1711#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1712#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1713#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1714#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1715#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1716 * under lock */
1717#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1718
1719#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1720#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1721#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1722#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1723
1724#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1725
1726#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1727 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1728 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1729 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1730 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1731
1732/* don't forget obdo_fid which is way down at the bottom so it can
1733 * come after the definition of llog_cookie */
1734
1735enum hss_valid {
1736 HSS_SETMASK = 0x01,
1737 HSS_CLEARMASK = 0x02,
1738 HSS_ARCHIVE_ID = 0x04,
1739};
1740
1741struct hsm_state_set {
1742 __u32 hss_valid;
1743 __u32 hss_archive_id;
1744 __u64 hss_setmask;
1745 __u64 hss_clearmask;
1746};
1747
1748extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1749extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1750
1751extern void lustre_swab_obd_statfs (struct obd_statfs *os);
1752
1753/* ost_body.data values for OST_BRW */
1754
1755#define OBD_BRW_READ 0x01
1756#define OBD_BRW_WRITE 0x02
1757#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1758#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1759 * transfer and is not accounted in
1760 * the grant. */
1761#define OBD_BRW_CHECK 0x10
1762#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1763#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1764#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1765#define OBD_BRW_NOQUOTA 0x100
1766#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1767#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1768#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1769#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1770#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1771
1772#define OBD_OBJECT_EOF 0xffffffffffffffffULL
1773
1774#define OST_MIN_PRECREATE 32
1775#define OST_MAX_PRECREATE 20000
1776
1777struct obd_ioobj {
1778 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1779 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1780 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1781 * high 16 bits in 2.4 and later */
1782 __u32 ioo_bufcnt; /* number of niobufs for this object */
1783};
1784
1785#define IOOBJ_MAX_BRW_BITS 16
1786#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1787#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1788#define ioobj_max_brw_set(ioo, num) \
1789do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1790
1791extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
1792
1793/* multiple of 8 bytes => can array */
1794struct niobuf_remote {
1795 __u64 offset;
1796 __u32 len;
1797 __u32 flags;
1798};
1799
1800extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
1801
1802/* lock value block communicated between the filter and llite */
1803
1804/* OST_LVB_ERR_INIT is needed because the return code in rc is
1805 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1806#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1807#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1808#define OST_LVB_IS_ERR(blocks) \
1809 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1810#define OST_LVB_SET_ERR(blocks, rc) \
1811 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1812#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1813
1814struct ost_lvb_v1 {
1815 __u64 lvb_size;
1816 obd_time lvb_mtime;
1817 obd_time lvb_atime;
1818 obd_time lvb_ctime;
1819 __u64 lvb_blocks;
1820};
1821
1822extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1823
1824struct ost_lvb {
1825 __u64 lvb_size;
1826 obd_time lvb_mtime;
1827 obd_time lvb_atime;
1828 obd_time lvb_ctime;
1829 __u64 lvb_blocks;
1830 __u32 lvb_mtime_ns;
1831 __u32 lvb_atime_ns;
1832 __u32 lvb_ctime_ns;
1833 __u32 lvb_padding;
1834};
1835
1836extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1837
1838/*
1839 * lquota data structures
1840 */
1841
1842#ifndef QUOTABLOCK_BITS
1843#define QUOTABLOCK_BITS 10
1844#endif
1845
1846#ifndef QUOTABLOCK_SIZE
1847#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
1848#endif
1849
1850#ifndef toqb
1851#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
1852#endif
1853
1854/* The lquota_id structure is an union of all the possible identifier types that
1855 * can be used with quota, this includes:
1856 * - 64-bit user ID
1857 * - 64-bit group ID
1858 * - a FID which can be used for per-directory quota in the future */
1859union lquota_id {
1860 struct lu_fid qid_fid; /* FID for per-directory quota */
1861 __u64 qid_uid; /* user identifier */
1862 __u64 qid_gid; /* group identifier */
1863};
1864
1865/* quotactl management */
1866struct obd_quotactl {
1867 __u32 qc_cmd;
1868 __u32 qc_type; /* see Q_* flag below */
1869 __u32 qc_id;
1870 __u32 qc_stat;
1871 struct obd_dqinfo qc_dqinfo;
1872 struct obd_dqblk qc_dqblk;
1873};
1874
1875extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1876
1877#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
1878#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
1879#define Q_GETOINFO 0x800102 /* get obd quota info */
1880#define Q_GETOQUOTA 0x800103 /* get obd quotas */
1881#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
1882
1883#define Q_COPY(out, in, member) (out)->member = (in)->member
1884
1885#define QCTL_COPY(out, in) \
1886do { \
1887 Q_COPY(out, in, qc_cmd); \
1888 Q_COPY(out, in, qc_type); \
1889 Q_COPY(out, in, qc_id); \
1890 Q_COPY(out, in, qc_stat); \
1891 Q_COPY(out, in, qc_dqinfo); \
1892 Q_COPY(out, in, qc_dqblk); \
1893} while (0)
1894
1895/* Body of quota request used for quota acquire/release RPCs between quota
1896 * master (aka QMT) and slaves (ak QSD). */
1897struct quota_body {
1898 struct lu_fid qb_fid; /* FID of global index packing the pool ID
1899 * and type (data or metadata) as well as
1900 * the quota type (user or group). */
1901 union lquota_id qb_id; /* uid or gid or directory FID */
1902 __u32 qb_flags; /* see below */
1903 __u32 qb_padding;
1904 __u64 qb_count; /* acquire/release count (kbytes/inodes) */
1905 __u64 qb_usage; /* current slave usage (kbytes/inodes) */
1906 __u64 qb_slv_ver; /* slave index file version */
1907 struct lustre_handle qb_lockh; /* per-ID lock handle */
1908 struct lustre_handle qb_glb_lockh; /* global lock handle */
1909 __u64 qb_padding1[4];
1910};
1911
1912/* When the quota_body is used in the reply of quota global intent
1913 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
1914#define qb_slv_fid qb_fid
1915/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
1916 * quota reply */
1917#define qb_qunit qb_usage
1918
1919#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
1920#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
1921#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
1922#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
1923
1924extern void lustre_swab_quota_body(struct quota_body *b);
1925
1926/* Quota types currently supported */
1927enum {
1928 LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
1929 LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
1930 LQUOTA_TYPE_MAX
1931};
1932
1933/* There are 2 different resource types on which a quota limit can be enforced:
1934 * - inodes on the MDTs
1935 * - blocks on the OSTs */
1936enum {
1937 LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
1938 LQUOTA_RES_DT = 0x02,
1939 LQUOTA_LAST_RES,
1940 LQUOTA_FIRST_RES = LQUOTA_RES_MD
1941};
1942#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
1943
1944/*
1945 * Space accounting support
1946 * Format of an accounting record, providing disk usage information for a given
1947 * user or group
1948 */
1949struct lquota_acct_rec { /* 16 bytes */
1950 __u64 bspace; /* current space in use */
1951 __u64 ispace; /* current # inodes in use */
1952};
1953
1954/*
1955 * Global quota index support
1956 * Format of a global record, providing global quota settings for a given quota
1957 * identifier
1958 */
1959struct lquota_glb_rec { /* 32 bytes */
1960 __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
1961 __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
1962 __u64 qbr_time; /* grace time, in seconds */
1963 __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
1964 * kbytes */
1965};
1966
1967/*
1968 * Slave index support
1969 * Format of a slave record, recording how much space is granted to a given
1970 * slave
1971 */
1972struct lquota_slv_rec { /* 8 bytes */
1973 __u64 qsr_granted; /* space granted to the slave for the key=ID,
1974 * in #inodes or kbytes */
1975};
1976
1977/* Data structures associated with the quota locks */
1978
1979/* Glimpse descriptor used for the index & per-ID quota locks */
1980struct ldlm_gl_lquota_desc {
1981 union lquota_id gl_id; /* quota ID subject to the glimpse */
1982 __u64 gl_flags; /* see LQUOTA_FL* below */
1983 __u64 gl_ver; /* new index version */
1984 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1985 __u64 gl_softlimit; /* new softlimit */
1986 __u64 gl_time;
1987 __u64 gl_pad2;
1988};
1989#define gl_qunit gl_hardlimit /* current qunit value used when
1990 * glimpsing per-ID quota locks */
1991
1992/* quota glimpse flags */
1993#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1994
1995/* LVB used with quota (global and per-ID) locks */
1996struct lquota_lvb {
1997 __u64 lvb_flags; /* see LQUOTA_FL* above */
1998 __u64 lvb_id_may_rel; /* space that might be released later */
1999 __u64 lvb_id_rel; /* space released by the slave for this ID */
2000 __u64 lvb_id_qunit; /* current qunit value */
2001 __u64 lvb_pad1;
2002};
2003
2004extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
2005
2006/* LVB used with global quota lock */
2007#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
2008
2009/* op codes */
2010typedef enum {
2011 QUOTA_DQACQ = 601,
2012 QUOTA_DQREL = 602,
2013 QUOTA_LAST_OPC
2014} quota_cmd_t;
2015#define QUOTA_FIRST_OPC QUOTA_DQACQ
2016
2017/*
2018 * MDS REQ RECORDS
2019 */
2020
2021/* opcodes */
2022typedef enum {
2023 MDS_GETATTR = 33,
2024 MDS_GETATTR_NAME = 34,
2025 MDS_CLOSE = 35,
2026 MDS_REINT = 36,
2027 MDS_READPAGE = 37,
2028 MDS_CONNECT = 38,
2029 MDS_DISCONNECT = 39,
2030 MDS_GETSTATUS = 40,
2031 MDS_STATFS = 41,
2032 MDS_PIN = 42,
2033 MDS_UNPIN = 43,
2034 MDS_SYNC = 44,
2035 MDS_DONE_WRITING = 45,
2036 MDS_SET_INFO = 46,
2037 MDS_QUOTACHECK = 47,
2038 MDS_QUOTACTL = 48,
2039 MDS_GETXATTR = 49,
2040 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
2041 MDS_WRITEPAGE = 51,
2042 MDS_IS_SUBDIR = 52,
2043 MDS_GET_INFO = 53,
2044 MDS_HSM_STATE_GET = 54,
2045 MDS_HSM_STATE_SET = 55,
2046 MDS_HSM_ACTION = 56,
2047 MDS_HSM_PROGRESS = 57,
2048 MDS_HSM_REQUEST = 58,
2049 MDS_HSM_CT_REGISTER = 59,
2050 MDS_HSM_CT_UNREGISTER = 60,
2051 MDS_SWAP_LAYOUTS = 61,
2052 MDS_LAST_OPC
2053} mds_cmd_t;
2054
2055#define MDS_FIRST_OPC MDS_GETATTR
2056
2057
2058/* opcodes for object update */
2059typedef enum {
2060 UPDATE_OBJ = 1000,
2061 UPDATE_LAST_OPC
2062} update_cmd_t;
2063
2064#define UPDATE_FIRST_OPC UPDATE_OBJ
2065
2066/*
2067 * Do not exceed 63
2068 */
2069
2070typedef enum {
2071 REINT_SETATTR = 1,
2072 REINT_CREATE = 2,
2073 REINT_LINK = 3,
2074 REINT_UNLINK = 4,
2075 REINT_RENAME = 5,
2076 REINT_OPEN = 6,
2077 REINT_SETXATTR = 7,
2078 REINT_RMENTRY = 8,
2079// REINT_WRITE = 9,
2080 REINT_MAX
2081} mds_reint_t, mdt_reint_t;
2082
2083extern void lustre_swab_generic_32s (__u32 *val);
2084
2085/* the disposition of the intent outlines what was executed */
2086#define DISP_IT_EXECD 0x00000001
2087#define DISP_LOOKUP_EXECD 0x00000002
2088#define DISP_LOOKUP_NEG 0x00000004
2089#define DISP_LOOKUP_POS 0x00000008
2090#define DISP_OPEN_CREATE 0x00000010
2091#define DISP_OPEN_OPEN 0x00000020
2092#define DISP_ENQ_COMPLETE 0x00400000
2093#define DISP_ENQ_OPEN_REF 0x00800000
2094#define DISP_ENQ_CREATE_REF 0x01000000
2095#define DISP_OPEN_LOCK 0x02000000
2096
2097/* INODE LOCK PARTS */
2098#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
2099#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2100#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2101#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2102#define MDS_INODELOCK_PERM 0x000010 /* for permission */
2103
2104#define MDS_INODELOCK_MAXSHIFT 4
2105/* This FULL lock is useful to take on unlink sort of operations */
2106#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2107
2108extern void lustre_swab_ll_fid (struct ll_fid *fid);
2109
2110/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2111 * but was moved into name[1] along with the OID to avoid consuming the
2112 * name[2,3] fields that need to be used for the quota id (also a FID). */
2113enum {
2114 LUSTRE_RES_ID_SEQ_OFF = 0,
2115 LUSTRE_RES_ID_VER_OID_OFF = 1,
2116 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2117 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2118 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2119 LUSTRE_RES_ID_HSH_OFF = 3
2120};
2121
2122#define MDS_STATUS_CONN 1
2123#define MDS_STATUS_LOV 2
2124
2125/* mdt_thread_info.mti_flags. */
2126enum md_op_flags {
2127 /* The flag indicates Size-on-MDS attributes are changed. */
2128 MF_SOM_CHANGE = (1 << 0),
2129 /* Flags indicates an epoch opens or closes. */
2130 MF_EPOCH_OPEN = (1 << 1),
2131 MF_EPOCH_CLOSE = (1 << 2),
2132 MF_MDC_CANCEL_FID1 = (1 << 3),
2133 MF_MDC_CANCEL_FID2 = (1 << 4),
2134 MF_MDC_CANCEL_FID3 = (1 << 5),
2135 MF_MDC_CANCEL_FID4 = (1 << 6),
2136 /* There is a pending attribute update. */
2137 MF_SOM_AU = (1 << 7),
2138 /* Cancel OST locks while getattr OST attributes. */
2139 MF_GETATTR_LOCK = (1 << 8),
2140 MF_GET_MDT_IDX = (1 << 9),
2141};
2142
2143#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2144
2145#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2146
2147/* these should be identical to their EXT4_*_FL counterparts, they are
2148 * redefined here only to avoid dragging in fs/ext4/ext4.h */
2149#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2150#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2151#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2152#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2153#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2154
2155/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2156 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2157 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2158 * the S_* flags are kernel-internal values that change between kernel
2159 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2160 * See b=16526 for a full history. */
2161static inline int ll_ext_to_inode_flags(int flags)
2162{
2163 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2164 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2165 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2166#if defined(S_DIRSYNC)
2167 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2168#endif
2169 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2170}
2171
2172static inline int ll_inode_to_ext_flags(int iflags)
2173{
2174 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2175 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2176 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2177#if defined(S_DIRSYNC)
2178 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2179#endif
2180 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2181}
2182
2183struct mdt_body {
2184 struct lu_fid fid1;
2185 struct lu_fid fid2;
2186 struct lustre_handle handle;
2187 __u64 valid;
2188 __u64 size; /* Offset, in the case of MDS_READPAGE */
2189 obd_time mtime;
2190 obd_time atime;
2191 obd_time ctime;
2192 __u64 blocks; /* XID, in the case of MDS_READPAGE */
2193 __u64 ioepoch;
2194 __u64 unused1; /* was "ino" until 2.4.0 */
2195 __u32 fsuid;
2196 __u32 fsgid;
2197 __u32 capability;
2198 __u32 mode;
2199 __u32 uid;
2200 __u32 gid;
2201 __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2202 __u32 rdev;
2203 __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
2204 __u32 unused2; /* was "generation" until 2.4.0 */
2205 __u32 suppgid;
2206 __u32 eadatasize;
2207 __u32 aclsize;
2208 __u32 max_mdsize;
2209 __u32 max_cookiesize;
2210 __u32 uid_h; /* high 32-bits of uid, for FUID */
2211 __u32 gid_h; /* high 32-bits of gid, for FUID */
2212 __u32 padding_5; /* also fix lustre_swab_mdt_body */
2213 __u64 padding_6;
2214 __u64 padding_7;
2215 __u64 padding_8;
2216 __u64 padding_9;
2217 __u64 padding_10;
2218}; /* 216 */
2219
2220extern void lustre_swab_mdt_body (struct mdt_body *b);
2221
2222struct mdt_ioepoch {
2223 struct lustre_handle handle;
2224 __u64 ioepoch;
2225 __u32 flags;
2226 __u32 padding;
2227};
2228
2229extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
2230
2231/* permissions for md_perm.mp_perm */
2232enum {
2233 CFS_SETUID_PERM = 0x01,
2234 CFS_SETGID_PERM = 0x02,
2235 CFS_SETGRP_PERM = 0x04,
2236 CFS_RMTACL_PERM = 0x08,
2237 CFS_RMTOWN_PERM = 0x10
2238};
2239
2240/* inode access permission for remote user, the inode info are omitted,
2241 * for client knows them. */
2242struct mdt_remote_perm {
2243 __u32 rp_uid;
2244 __u32 rp_gid;
2245 __u32 rp_fsuid;
2246 __u32 rp_fsuid_h;
2247 __u32 rp_fsgid;
2248 __u32 rp_fsgid_h;
2249 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2250 __u32 rp_padding;
2251};
2252
2253extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2254
2255struct mdt_rec_setattr {
2256 __u32 sa_opcode;
2257 __u32 sa_cap;
2258 __u32 sa_fsuid;
2259 __u32 sa_fsuid_h;
2260 __u32 sa_fsgid;
2261 __u32 sa_fsgid_h;
2262 __u32 sa_suppgid;
2263 __u32 sa_suppgid_h;
2264 __u32 sa_padding_1;
2265 __u32 sa_padding_1_h;
2266 struct lu_fid sa_fid;
2267 __u64 sa_valid;
2268 __u32 sa_uid;
2269 __u32 sa_gid;
2270 __u64 sa_size;
2271 __u64 sa_blocks;
2272 obd_time sa_mtime;
2273 obd_time sa_atime;
2274 obd_time sa_ctime;
2275 __u32 sa_attr_flags;
2276 __u32 sa_mode;
2277 __u32 sa_bias; /* some operation flags */
2278 __u32 sa_padding_3;
2279 __u32 sa_padding_4;
2280 __u32 sa_padding_5;
2281};
2282
2283extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
2284
2285/*
2286 * Attribute flags used in mdt_rec_setattr::sa_valid.
2287 * The kernel's #defines for ATTR_* should not be used over the network
2288 * since the client and MDS may run different kernels (see bug 13828)
2289 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2290 */
2291#define MDS_ATTR_MODE 0x1ULL /* = 1 */
2292#define MDS_ATTR_UID 0x2ULL /* = 2 */
2293#define MDS_ATTR_GID 0x4ULL /* = 4 */
2294#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2295#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2296#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2297#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2298#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2299#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2300#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2301#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2302#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2303#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2304#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2305#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2306#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2307
2308#ifndef FMODE_READ
2309#define FMODE_READ 00000001
2310#define FMODE_WRITE 00000002
2311#endif
2312
2313#define MDS_FMODE_CLOSED 00000000
2314#define MDS_FMODE_EXEC 00000004
2315/* IO Epoch is opened on a closed file. */
2316#define MDS_FMODE_EPOCH 01000000
2317/* IO Epoch is opened on a file truncate. */
2318#define MDS_FMODE_TRUNC 02000000
2319/* Size-on-MDS Attribute Update is pending. */
2320#define MDS_FMODE_SOM 04000000
2321
2322#define MDS_OPEN_CREATED 00000010
2323#define MDS_OPEN_CROSS 00000020
2324
2325#define MDS_OPEN_CREAT 00000100
2326#define MDS_OPEN_EXCL 00000200
2327#define MDS_OPEN_TRUNC 00001000
2328#define MDS_OPEN_APPEND 00002000
2329#define MDS_OPEN_SYNC 00010000
2330#define MDS_OPEN_DIRECTORY 00200000
2331
2332#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2333#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2334#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2335#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2336 * We do not support JOIN FILE
2337 * anymore, reserve this flags
2338 * just for preventing such bit
2339 * to be reused. */
2340
2341#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2342#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2343#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2344#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2345#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2346 * hsm restore) */
2347#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2348 unlinked */
2349
2350/* permission for create non-directory file */
2351#define MAY_CREATE (1 << 7)
2352/* permission for create directory file */
2353#define MAY_LINK (1 << 8)
2354/* permission for delete from the directory */
2355#define MAY_UNLINK (1 << 9)
2356/* source's permission for rename */
2357#define MAY_RENAME_SRC (1 << 10)
2358/* target's permission for rename */
2359#define MAY_RENAME_TAR (1 << 11)
2360/* part (parent's) VTX permission check */
2361#define MAY_VTX_PART (1 << 12)
2362/* full VTX permission check */
2363#define MAY_VTX_FULL (1 << 13)
2364/* lfs rgetfacl permission check */
2365#define MAY_RGETFACL (1 << 14)
2366
2367enum {
2368 MDS_CHECK_SPLIT = 1 << 0,
2369 MDS_CROSS_REF = 1 << 1,
2370 MDS_VTX_BYPASS = 1 << 2,
2371 MDS_PERM_BYPASS = 1 << 3,
2372 MDS_SOM = 1 << 4,
2373 MDS_QUOTA_IGNORE = 1 << 5,
2374 MDS_CLOSE_CLEANUP = 1 << 6,
2375 MDS_KEEP_ORPHAN = 1 << 7,
2376 MDS_RECOV_OPEN = 1 << 8,
2377 MDS_DATA_MODIFIED = 1 << 9,
2378 MDS_CREATE_VOLATILE = 1 << 10,
2379 MDS_OWNEROVERRIDE = 1 << 11,
2380};
2381
2382/* instance of mdt_reint_rec */
2383struct mdt_rec_create {
2384 __u32 cr_opcode;
2385 __u32 cr_cap;
2386 __u32 cr_fsuid;
2387 __u32 cr_fsuid_h;
2388 __u32 cr_fsgid;
2389 __u32 cr_fsgid_h;
2390 __u32 cr_suppgid1;
2391 __u32 cr_suppgid1_h;
2392 __u32 cr_suppgid2;
2393 __u32 cr_suppgid2_h;
2394 struct lu_fid cr_fid1;
2395 struct lu_fid cr_fid2;
2396 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2397 obd_time cr_time;
2398 __u64 cr_rdev;
2399 __u64 cr_ioepoch;
2400 __u64 cr_padding_1; /* rr_blocks */
2401 __u32 cr_mode;
2402 __u32 cr_bias;
2403 /* use of helpers set/get_mrc_cr_flags() is needed to access
2404 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2405 * extend cr_flags size without breaking 1.8 compat */
2406 __u32 cr_flags_l; /* for use with open, low 32 bits */
2407 __u32 cr_flags_h; /* for use with open, high 32 bits */
2408 __u32 cr_umask; /* umask for create */
2409 __u32 cr_padding_4; /* rr_padding_4 */
2410};
2411
2412static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2413{
2414 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2415 mrc->cr_flags_h = (__u32)(flags >> 32);
2416}
2417
2418static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2419{
2420 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2421}
2422
2423/* instance of mdt_reint_rec */
2424struct mdt_rec_link {
2425 __u32 lk_opcode;
2426 __u32 lk_cap;
2427 __u32 lk_fsuid;
2428 __u32 lk_fsuid_h;
2429 __u32 lk_fsgid;
2430 __u32 lk_fsgid_h;
2431 __u32 lk_suppgid1;
2432 __u32 lk_suppgid1_h;
2433 __u32 lk_suppgid2;
2434 __u32 lk_suppgid2_h;
2435 struct lu_fid lk_fid1;
2436 struct lu_fid lk_fid2;
2437 obd_time lk_time;
2438 __u64 lk_padding_1; /* rr_atime */
2439 __u64 lk_padding_2; /* rr_ctime */
2440 __u64 lk_padding_3; /* rr_size */
2441 __u64 lk_padding_4; /* rr_blocks */
2442 __u32 lk_bias;
2443 __u32 lk_padding_5; /* rr_mode */
2444 __u32 lk_padding_6; /* rr_flags */
2445 __u32 lk_padding_7; /* rr_padding_2 */
2446 __u32 lk_padding_8; /* rr_padding_3 */
2447 __u32 lk_padding_9; /* rr_padding_4 */
2448};
2449
2450/* instance of mdt_reint_rec */
2451struct mdt_rec_unlink {
2452 __u32 ul_opcode;
2453 __u32 ul_cap;
2454 __u32 ul_fsuid;
2455 __u32 ul_fsuid_h;
2456 __u32 ul_fsgid;
2457 __u32 ul_fsgid_h;
2458 __u32 ul_suppgid1;
2459 __u32 ul_suppgid1_h;
2460 __u32 ul_suppgid2;
2461 __u32 ul_suppgid2_h;
2462 struct lu_fid ul_fid1;
2463 struct lu_fid ul_fid2;
2464 obd_time ul_time;
2465 __u64 ul_padding_2; /* rr_atime */
2466 __u64 ul_padding_3; /* rr_ctime */
2467 __u64 ul_padding_4; /* rr_size */
2468 __u64 ul_padding_5; /* rr_blocks */
2469 __u32 ul_bias;
2470 __u32 ul_mode;
2471 __u32 ul_padding_6; /* rr_flags */
2472 __u32 ul_padding_7; /* rr_padding_2 */
2473 __u32 ul_padding_8; /* rr_padding_3 */
2474 __u32 ul_padding_9; /* rr_padding_4 */
2475};
2476
2477/* instance of mdt_reint_rec */
2478struct mdt_rec_rename {
2479 __u32 rn_opcode;
2480 __u32 rn_cap;
2481 __u32 rn_fsuid;
2482 __u32 rn_fsuid_h;
2483 __u32 rn_fsgid;
2484 __u32 rn_fsgid_h;
2485 __u32 rn_suppgid1;
2486 __u32 rn_suppgid1_h;
2487 __u32 rn_suppgid2;
2488 __u32 rn_suppgid2_h;
2489 struct lu_fid rn_fid1;
2490 struct lu_fid rn_fid2;
2491 obd_time rn_time;
2492 __u64 rn_padding_1; /* rr_atime */
2493 __u64 rn_padding_2; /* rr_ctime */
2494 __u64 rn_padding_3; /* rr_size */
2495 __u64 rn_padding_4; /* rr_blocks */
2496 __u32 rn_bias; /* some operation flags */
2497 __u32 rn_mode; /* cross-ref rename has mode */
2498 __u32 rn_padding_5; /* rr_flags */
2499 __u32 rn_padding_6; /* rr_padding_2 */
2500 __u32 rn_padding_7; /* rr_padding_3 */
2501 __u32 rn_padding_8; /* rr_padding_4 */
2502};
2503
2504/* instance of mdt_reint_rec */
2505struct mdt_rec_setxattr {
2506 __u32 sx_opcode;
2507 __u32 sx_cap;
2508 __u32 sx_fsuid;
2509 __u32 sx_fsuid_h;
2510 __u32 sx_fsgid;
2511 __u32 sx_fsgid_h;
2512 __u32 sx_suppgid1;
2513 __u32 sx_suppgid1_h;
2514 __u32 sx_suppgid2;
2515 __u32 sx_suppgid2_h;
2516 struct lu_fid sx_fid;
2517 __u64 sx_padding_1; /* These three are rr_fid2 */
2518 __u32 sx_padding_2;
2519 __u32 sx_padding_3;
2520 __u64 sx_valid;
2521 obd_time sx_time;
2522 __u64 sx_padding_5; /* rr_ctime */
2523 __u64 sx_padding_6; /* rr_size */
2524 __u64 sx_padding_7; /* rr_blocks */
2525 __u32 sx_size;
2526 __u32 sx_flags;
2527 __u32 sx_padding_8; /* rr_flags */
2528 __u32 sx_padding_9; /* rr_padding_2 */
2529 __u32 sx_padding_10; /* rr_padding_3 */
2530 __u32 sx_padding_11; /* rr_padding_4 */
2531};
2532
2533/*
2534 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2535 * Do NOT change the size of various members, otherwise the value
2536 * will be broken in lustre_swab_mdt_rec_reint().
2537 *
2538 * If you add new members in other mdt_reint_xxx structres and need to use the
2539 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2540 */
2541struct mdt_rec_reint {
2542 __u32 rr_opcode;
2543 __u32 rr_cap;
2544 __u32 rr_fsuid;
2545 __u32 rr_fsuid_h;
2546 __u32 rr_fsgid;
2547 __u32 rr_fsgid_h;
2548 __u32 rr_suppgid1;
2549 __u32 rr_suppgid1_h;
2550 __u32 rr_suppgid2;
2551 __u32 rr_suppgid2_h;
2552 struct lu_fid rr_fid1;
2553 struct lu_fid rr_fid2;
2554 obd_time rr_mtime;
2555 obd_time rr_atime;
2556 obd_time rr_ctime;
2557 __u64 rr_size;
2558 __u64 rr_blocks;
2559 __u32 rr_bias;
2560 __u32 rr_mode;
2561 __u32 rr_flags;
2562 __u32 rr_flags_h;
2563 __u32 rr_umask;
2564 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2565};
2566
2567extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2568
2569struct lmv_desc {
2570 __u32 ld_tgt_count; /* how many MDS's */
2571 __u32 ld_active_tgt_count; /* how many active */
2572 __u32 ld_default_stripe_count; /* how many objects are used */
2573 __u32 ld_pattern; /* default MEA_MAGIC_* */
2574 __u64 ld_default_hash_size;
2575 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2576 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2577 __u32 ld_qos_maxage; /* in second */
2578 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2579 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2580 struct obd_uuid ld_uuid;
2581};
2582
2583extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
2584
2585/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2586struct lmv_stripe_md {
2587 __u32 mea_magic;
2588 __u32 mea_count;
2589 __u32 mea_master;
2590 __u32 mea_padding;
2591 char mea_pool_name[LOV_MAXPOOLNAME];
2592 struct lu_fid mea_ids[0];
2593};
2594
2595extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
2596
2597/* lmv structures */
2598#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2599#define MEA_MAGIC_ALL_CHARS 0xb222a11c
2600#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
2601
2602#define MAX_HASH_SIZE_32 0x7fffffffUL
2603#define MAX_HASH_SIZE 0x7fffffffffffffffULL
2604#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2605
2606enum fld_rpc_opc {
2607 FLD_QUERY = 900,
2608 FLD_LAST_OPC,
2609 FLD_FIRST_OPC = FLD_QUERY
2610};
2611
2612enum seq_rpc_opc {
2613 SEQ_QUERY = 700,
2614 SEQ_LAST_OPC,
2615 SEQ_FIRST_OPC = SEQ_QUERY
2616};
2617
2618enum seq_op {
2619 SEQ_ALLOC_SUPER = 0,
2620 SEQ_ALLOC_META = 1
2621};
2622
2623/*
2624 * LOV data structures
2625 */
2626
2627#define LOV_MAX_UUID_BUFFER_SIZE 8192
2628/* The size of the buffer the lov/mdc reserves for the
2629 * array of UUIDs returned by the MDS. With the current
2630 * protocol, this will limit the max number of OSTs per LOV */
2631
2632#define LOV_DESC_MAGIC 0xB0CCDE5C
2633
2634/* LOV settings descriptor (should only contain static info) */
2635struct lov_desc {
2636 __u32 ld_tgt_count; /* how many OBD's */
2637 __u32 ld_active_tgt_count; /* how many active */
2638 __u32 ld_default_stripe_count; /* how many objects are used */
2639 __u32 ld_pattern; /* default PATTERN_RAID0 */
2640 __u64 ld_default_stripe_size; /* in bytes */
2641 __u64 ld_default_stripe_offset; /* in bytes */
2642 __u32 ld_padding_0; /* unused */
2643 __u32 ld_qos_maxage; /* in second */
2644 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2645 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2646 struct obd_uuid ld_uuid;
2647};
2648
2649#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2650
2651extern void lustre_swab_lov_desc (struct lov_desc *ld);
2652
2653/*
2654 * LDLM requests:
2655 */
2656/* opcodes -- MUST be distinct from OST/MDS opcodes */
2657typedef enum {
2658 LDLM_ENQUEUE = 101,
2659 LDLM_CONVERT = 102,
2660 LDLM_CANCEL = 103,
2661 LDLM_BL_CALLBACK = 104,
2662 LDLM_CP_CALLBACK = 105,
2663 LDLM_GL_CALLBACK = 106,
2664 LDLM_SET_INFO = 107,
2665 LDLM_LAST_OPC
2666} ldlm_cmd_t;
2667#define LDLM_FIRST_OPC LDLM_ENQUEUE
2668
2669#define RES_NAME_SIZE 4
2670struct ldlm_res_id {
2671 __u64 name[RES_NAME_SIZE];
2672};
2673
2674extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
2675
2676static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2677 const struct ldlm_res_id *res1)
2678{
2679 return !memcmp(res0, res1, sizeof(*res0));
2680}
2681
2682/* lock types */
2683typedef enum {
2684 LCK_MINMODE = 0,
2685 LCK_EX = 1,
2686 LCK_PW = 2,
2687 LCK_PR = 4,
2688 LCK_CW = 8,
2689 LCK_CR = 16,
2690 LCK_NL = 32,
2691 LCK_GROUP = 64,
2692 LCK_COS = 128,
2693 LCK_MAXMODE
2694} ldlm_mode_t;
2695
2696#define LCK_MODE_NUM 8
2697
2698typedef enum {
2699 LDLM_PLAIN = 10,
2700 LDLM_EXTENT = 11,
2701 LDLM_FLOCK = 12,
2702 LDLM_IBITS = 13,
2703 LDLM_MAX_TYPE
2704} ldlm_type_t;
2705
2706#define LDLM_MIN_TYPE LDLM_PLAIN
2707
2708struct ldlm_extent {
2709 __u64 start;
2710 __u64 end;
2711 __u64 gid;
2712};
2713
2714static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2715 struct ldlm_extent *ex2)
2716{
2717 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2718}
2719
2720/* check if @ex1 contains @ex2 */
2721static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2722 struct ldlm_extent *ex2)
2723{
2724 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2725}
2726
2727struct ldlm_inodebits {
2728 __u64 bits;
2729};
2730
2731struct ldlm_flock_wire {
2732 __u64 lfw_start;
2733 __u64 lfw_end;
2734 __u64 lfw_owner;
2735 __u32 lfw_padding;
2736 __u32 lfw_pid;
2737};
2738
2739/* it's important that the fields of the ldlm_extent structure match
2740 * the first fields of the ldlm_flock structure because there is only
2741 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2742 * this ever changes we will need to swab the union differently based
2743 * on the resource type. */
2744
2745typedef union {
2746 struct ldlm_extent l_extent;
2747 struct ldlm_flock_wire l_flock;
2748 struct ldlm_inodebits l_inodebits;
2749} ldlm_wire_policy_data_t;
2750
2751extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
2752
2753union ldlm_gl_desc {
2754 struct ldlm_gl_lquota_desc lquota_desc;
2755};
2756
2757extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
2758
2759struct ldlm_intent {
2760 __u64 opc;
2761};
2762
2763extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
2764
2765struct ldlm_resource_desc {
2766 ldlm_type_t lr_type;
2767 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2768 struct ldlm_res_id lr_name;
2769};
2770
2771extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
2772
2773struct ldlm_lock_desc {
2774 struct ldlm_resource_desc l_resource;
2775 ldlm_mode_t l_req_mode;
2776 ldlm_mode_t l_granted_mode;
2777 ldlm_wire_policy_data_t l_policy_data;
2778};
2779
2780extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
2781
2782#define LDLM_LOCKREQ_HANDLES 2
2783#define LDLM_ENQUEUE_CANCEL_OFF 1
2784
2785struct ldlm_request {
2786 __u32 lock_flags;
2787 __u32 lock_count;
2788 struct ldlm_lock_desc lock_desc;
2789 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2790};
2791
2792extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
2793
2794/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2795 * Otherwise, 2 are available. */
2796#define ldlm_request_bufsize(count,type) \
2797({ \
2798 int _avail = LDLM_LOCKREQ_HANDLES; \
2799 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2800 sizeof(struct ldlm_request) + \
2801 (count > _avail ? count - _avail : 0) * \
2802 sizeof(struct lustre_handle); \
2803})
2804
2805struct ldlm_reply {
2806 __u32 lock_flags;
2807 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2808 struct ldlm_lock_desc lock_desc;
2809 struct lustre_handle lock_handle;
2810 __u64 lock_policy_res1;
2811 __u64 lock_policy_res2;
2812};
2813
2814extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
2815
2816#define ldlm_flags_to_wire(flags) ((__u32)(flags))
2817#define ldlm_flags_from_wire(flags) ((__u64)(flags))
2818
2819/*
2820 * Opcodes for mountconf (mgs and mgc)
2821 */
2822typedef enum {
2823 MGS_CONNECT = 250,
2824 MGS_DISCONNECT,
2825 MGS_EXCEPTION, /* node died, etc. */
2826 MGS_TARGET_REG, /* whenever target starts up */
2827 MGS_TARGET_DEL,
2828 MGS_SET_INFO,
2829 MGS_CONFIG_READ,
2830 MGS_LAST_OPC
2831} mgs_cmd_t;
2832#define MGS_FIRST_OPC MGS_CONNECT
2833
2834#define MGS_PARAM_MAXLEN 1024
2835#define KEY_SET_INFO "set_info"
2836
2837struct mgs_send_param {
2838 char mgs_param[MGS_PARAM_MAXLEN];
2839};
2840
2841/* We pass this info to the MGS so it can write config logs */
2842#define MTI_NAME_MAXLEN 64
2843#define MTI_PARAM_MAXLEN 4096
2844#define MTI_NIDS_MAX 32
2845struct mgs_target_info {
2846 __u32 mti_lustre_ver;
2847 __u32 mti_stripe_index;
2848 __u32 mti_config_ver;
2849 __u32 mti_flags;
2850 __u32 mti_nid_count;
2851 __u32 mti_instance; /* Running instance of target */
2852 char mti_fsname[MTI_NAME_MAXLEN];
2853 char mti_svname[MTI_NAME_MAXLEN];
2854 char mti_uuid[sizeof(struct obd_uuid)];
2855 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2856 char mti_params[MTI_PARAM_MAXLEN];
2857};
2858extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2859
2860struct mgs_nidtbl_entry {
2861 __u64 mne_version; /* table version of this entry */
2862 __u32 mne_instance; /* target instance # */
2863 __u32 mne_index; /* target index */
2864 __u32 mne_length; /* length of this entry - by bytes */
2865 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2866 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2867 __u8 mne_nid_size; /* size of each NID, by bytes */
2868 __u8 mne_nid_count; /* # of NIDs in buffer */
2869 union {
2870 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2871 } u;
2872};
2873extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2874
2875struct mgs_config_body {
2876 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2877 __u64 mcb_offset; /* next index of config log to request */
2878 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2879 __u8 mcb_reserved;
2880 __u8 mcb_bits; /* bits unit size of config log */
2881 __u32 mcb_units; /* # of units for bulk transfer */
2882};
2883extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2884
2885struct mgs_config_res {
2886 __u64 mcr_offset; /* index of last config log */
2887 __u64 mcr_size; /* size of the log */
2888};
2889extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2890
2891/* Config marker flags (in config log) */
2892#define CM_START 0x01
2893#define CM_END 0x02
2894#define CM_SKIP 0x04
2895#define CM_UPGRADE146 0x08
2896#define CM_EXCLUDE 0x10
2897#define CM_START_SKIP (CM_START | CM_SKIP)
2898
2899struct cfg_marker {
2900 __u32 cm_step; /* aka config version */
2901 __u32 cm_flags;
2902 __u32 cm_vers; /* lustre release version number */
2903 __u32 cm_padding; /* 64 bit align */
2904 obd_time cm_createtime; /*when this record was first created */
2905 obd_time cm_canceltime; /*when this record is no longer valid*/
2906 char cm_tgtname[MTI_NAME_MAXLEN];
2907 char cm_comment[MTI_NAME_MAXLEN];
2908};
2909
2910extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
2911 int swab, int size);
2912
2913/*
2914 * Opcodes for multiple servers.
2915 */
2916
2917typedef enum {
2918 OBD_PING = 400,
2919 OBD_LOG_CANCEL,
2920 OBD_QC_CALLBACK,
2921 OBD_IDX_READ,
2922 OBD_LAST_OPC
2923} obd_cmd_t;
2924#define OBD_FIRST_OPC OBD_PING
2925
2926/* catalog of log objects */
2927
2928/** Identifier for a single log object */
2929struct llog_logid {
2930 struct ost_id lgl_oi;
2931 __u32 lgl_ogen;
2932} __attribute__((packed));
2933
2934/** Records written to the CATALOGS list */
2935#define CATLIST "CATALOGS"
2936struct llog_catid {
2937 struct llog_logid lci_logid;
2938 __u32 lci_padding1;
2939 __u32 lci_padding2;
2940 __u32 lci_padding3;
2941} __attribute__((packed));
2942
2943/* Log data record types - there is no specific reason that these need to
2944 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2945 */
2946#define LLOG_OP_MAGIC 0x10600000
2947#define LLOG_OP_MASK 0xfff00000
2948
2949typedef enum {
2950 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2951 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2952 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2953 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2954 REINT_UNLINK, /* obsolete after 2.5.0 */
2955 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2956 REINT_UNLINK,
2957 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2958 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2959 REINT_SETATTR,
2960 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2961 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2962 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2963 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2964 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2965 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2966 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2967 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2968} llog_op_type;
2969
2970#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2971 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2972
2973/** Log record header - stored in little endian order.
2974 * Each record must start with this struct, end with a llog_rec_tail,
2975 * and be a multiple of 256 bits in size.
2976 */
2977struct llog_rec_hdr {
2978 __u32 lrh_len;
2979 __u32 lrh_index;
2980 __u32 lrh_type;
2981 __u32 lrh_id;
2982};
2983
2984struct llog_rec_tail {
2985 __u32 lrt_len;
2986 __u32 lrt_index;
2987};
2988
2989/* Where data follow just after header */
2990#define REC_DATA(ptr) \
2991 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
2992
2993#define REC_DATA_LEN(rec) \
2994 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
2995 sizeof(struct llog_rec_tail))
2996
2997struct llog_logid_rec {
2998 struct llog_rec_hdr lid_hdr;
2999 struct llog_logid lid_id;
3000 __u32 lid_padding1;
3001 __u64 lid_padding2;
3002 __u64 lid_padding3;
3003 struct llog_rec_tail lid_tail;
3004} __attribute__((packed));
3005
3006struct llog_unlink_rec {
3007 struct llog_rec_hdr lur_hdr;
3008 obd_id lur_oid;
3009 obd_count lur_oseq;
3010 obd_count lur_count;
3011 struct llog_rec_tail lur_tail;
3012} __attribute__((packed));
3013
3014struct llog_unlink64_rec {
3015 struct llog_rec_hdr lur_hdr;
3016 struct lu_fid lur_fid;
3017 obd_count lur_count; /* to destroy the lost precreated */
3018 __u32 lur_padding1;
3019 __u64 lur_padding2;
3020 __u64 lur_padding3;
3021 struct llog_rec_tail lur_tail;
3022} __attribute__((packed));
3023
3024struct llog_setattr64_rec {
3025 struct llog_rec_hdr lsr_hdr;
3026 struct ost_id lsr_oi;
3027 __u32 lsr_uid;
3028 __u32 lsr_uid_h;
3029 __u32 lsr_gid;
3030 __u32 lsr_gid_h;
3031 __u64 lsr_padding;
3032 struct llog_rec_tail lsr_tail;
3033} __attribute__((packed));
3034
3035struct llog_size_change_rec {
3036 struct llog_rec_hdr lsc_hdr;
3037 struct ll_fid lsc_fid;
3038 __u32 lsc_ioepoch;
3039 __u32 lsc_padding1;
3040 __u64 lsc_padding2;
3041 __u64 lsc_padding3;
3042 struct llog_rec_tail lsc_tail;
3043} __attribute__((packed));
3044
3045#define CHANGELOG_MAGIC 0xca103000
3046
3047/** \a changelog_rec_type's that can't be masked */
3048#define CHANGELOG_MINMASK (1 << CL_MARK)
3049/** bits covering all \a changelog_rec_type's */
3050#define CHANGELOG_ALLMASK 0XFFFFFFFF
3051/** default \a changelog_rec_type mask */
3052#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
3053
3054/* changelog llog name, needed by client replicators */
3055#define CHANGELOG_CATALOG "changelog_catalog"
3056
3057struct changelog_setinfo {
3058 __u64 cs_recno;
3059 __u32 cs_id;
3060} __attribute__((packed));
3061
3062/** changelog record */
3063struct llog_changelog_rec {
3064 struct llog_rec_hdr cr_hdr;
3065 struct changelog_rec cr;
3066 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3067} __attribute__((packed));
3068
3069struct llog_changelog_ext_rec {
3070 struct llog_rec_hdr cr_hdr;
3071 struct changelog_ext_rec cr;
3072 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3073} __attribute__((packed));
3074
3075#define CHANGELOG_USER_PREFIX "cl"
3076
3077struct llog_changelog_user_rec {
3078 struct llog_rec_hdr cur_hdr;
3079 __u32 cur_id;
3080 __u32 cur_padding;
3081 __u64 cur_endrec;
3082 struct llog_rec_tail cur_tail;
3083} __attribute__((packed));
3084
3085/* Old llog gen for compatibility */
3086struct llog_gen {
3087 __u64 mnt_cnt;
3088 __u64 conn_cnt;
3089} __attribute__((packed));
3090
3091struct llog_gen_rec {
3092 struct llog_rec_hdr lgr_hdr;
3093 struct llog_gen lgr_gen;
3094 __u64 padding1;
3095 __u64 padding2;
3096 __u64 padding3;
3097 struct llog_rec_tail lgr_tail;
3098};
3099
3100/* On-disk header structure of each log object, stored in little endian order */
3101#define LLOG_CHUNK_SIZE 8192
3102#define LLOG_HEADER_SIZE (96)
3103#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3104
3105#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3106
3107/* flags for the logs */
3108enum llog_flag {
3109 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3110 LLOG_F_IS_CAT = 0x2,
3111 LLOG_F_IS_PLAIN = 0x4,
3112};
3113
3114struct llog_log_hdr {
3115 struct llog_rec_hdr llh_hdr;
3116 obd_time llh_timestamp;
3117 __u32 llh_count;
3118 __u32 llh_bitmap_offset;
3119 __u32 llh_size;
3120 __u32 llh_flags;
3121 __u32 llh_cat_idx;
3122 /* for a catalog the first plain slot is next to it */
3123 struct obd_uuid llh_tgtuuid;
3124 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3125 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3126 struct llog_rec_tail llh_tail;
3127} __attribute__((packed));
3128
3129#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3130 llh->llh_bitmap_offset - \
3131 sizeof(llh->llh_tail)) * 8)
3132
3133/** log cookies are used to reference a specific log file and a record therein */
3134struct llog_cookie {
3135 struct llog_logid lgc_lgl;
3136 __u32 lgc_subsys;
3137 __u32 lgc_index;
3138 __u32 lgc_padding;
3139} __attribute__((packed));
3140
3141/** llog protocol */
3142enum llogd_rpc_ops {
3143 LLOG_ORIGIN_HANDLE_CREATE = 501,
3144 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3145 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3146 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3147 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3148 LLOG_ORIGIN_CONNECT = 506,
3149 LLOG_CATINFO = 507, /* deprecated */
3150 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3151 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3152 LLOG_LAST_OPC,
3153 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3154};
3155
3156struct llogd_body {
3157 struct llog_logid lgd_logid;
3158 __u32 lgd_ctxt_idx;
3159 __u32 lgd_llh_flags;
3160 __u32 lgd_index;
3161 __u32 lgd_saved_index;
3162 __u32 lgd_len;
3163 __u64 lgd_cur_offset;
3164} __attribute__((packed));
3165
3166struct llogd_conn_body {
3167 struct llog_gen lgdc_gen;
3168 struct llog_logid lgdc_logid;
3169 __u32 lgdc_ctxt_idx;
3170} __attribute__((packed));
3171
3172/* Note: 64-bit types are 64-bit aligned in structure */
3173struct obdo {
3174 obd_valid o_valid; /* hot fields in this obdo */
3175 struct ost_id o_oi;
3176 obd_id o_parent_seq;
3177 obd_size o_size; /* o_size-o_blocks == ost_lvb */
3178 obd_time o_mtime;
3179 obd_time o_atime;
3180 obd_time o_ctime;
3181 obd_blocks o_blocks; /* brw: cli sent cached bytes */
3182 obd_size o_grant;
3183
3184 /* 32-bit fields start here: keep an even number of them via padding */
3185 obd_blksize o_blksize; /* optimal IO blocksize */
3186 obd_mode o_mode; /* brw: cli sent cache remain */
3187 obd_uid o_uid;
3188 obd_gid o_gid;
3189 obd_flag o_flags;
3190 obd_count o_nlink; /* brw: checksum */
3191 obd_count o_parent_oid;
3192 obd_count o_misc; /* brw: o_dropped */
3193
3194 __u64 o_ioepoch; /* epoch in ost writes */
3195 __u32 o_stripe_idx; /* holds stripe idx */
3196 __u32 o_parent_ver;
3197 struct lustre_handle o_handle; /* brw: lock handle to prolong
3198 * locks */
3199 struct llog_cookie o_lcookie; /* destroy: unlink cookie from
3200 * MDS */
3201 __u32 o_uid_h;
3202 __u32 o_gid_h;
3203
3204 __u64 o_data_version; /* getattr: sum of iversion for
3205 * each stripe.
3206 * brw: grant space consumed on
3207 * the client for the write */
3208 __u64 o_padding_4;
3209 __u64 o_padding_5;
3210 __u64 o_padding_6;
3211};
3212
3213#define o_dirty o_blocks
3214#define o_undirty o_mode
3215#define o_dropped o_misc
3216#define o_cksum o_nlink
3217#define o_grant_used o_data_version
3218
3b2f75fd 3219static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3220 struct obdo *wobdo, struct obdo *lobdo)
d7e09d03
PT
3221{
3222 memcpy(wobdo, lobdo, sizeof(*lobdo));
3223 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3b2f75fd 3224 if (ocd == NULL)
3225 return;
3226
3227 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
6752a53e 3228 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3b2f75fd 3229 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3230 * client communicate with pre-2.4 server */
3231 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3232 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3233 }
d7e09d03
PT
3234}
3235
3b2f75fd 3236static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3237 struct obdo *lobdo, struct obdo *wobdo)
d7e09d03
PT
3238{
3239 obd_flag local_flags = 0;
3240
3241 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3242 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3243
3244 LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));
3245
3246 memcpy(lobdo, wobdo, sizeof(*lobdo));
3247 if (local_flags != 0) {
3b2f75fd 3248 lobdo->o_valid |= OBD_MD_FLFLAGS;
3249 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3250 lobdo->o_flags |= local_flags;
3251 }
3252 if (ocd == NULL)
3253 return;
3254
3255 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3256 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3257 /* see above */
3258 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3259 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3260 lobdo->o_oi.oi_fid.f_ver = 0;
d7e09d03
PT
3261 }
3262}
3263
3264extern void lustre_swab_obdo (struct obdo *o);
3265
3266/* request structure for OST's */
3267struct ost_body {
3268 struct obdo oa;
3269};
3270
3271/* Key for FIEMAP to be used in get_info calls */
3272struct ll_fiemap_info_key {
3273 char name[8];
3274 struct obdo oa;
3275 struct ll_user_fiemap fiemap;
3276};
3277
3278extern void lustre_swab_ost_body (struct ost_body *b);
3279extern void lustre_swab_ost_last_id(obd_id *id);
3280extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3281
3282extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3283extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3284extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3285 int stripe_count);
3286extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3287
3288/* llog_swab.c */
3289extern void lustre_swab_llogd_body (struct llogd_body *d);
3290extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
3291extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
3292extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
ff8c39b2 3293extern void lustre_swab_llog_id(struct llog_logid *lid);
d7e09d03
PT
3294
3295struct lustre_cfg;
3296extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3297
3298/* Functions for dumping PTLRPC fields */
3299void dump_rniobuf(struct niobuf_remote *rnb);
3300void dump_ioo(struct obd_ioobj *nb);
3301void dump_obdo(struct obdo *oa);
3302void dump_ost_body(struct ost_body *ob);
3303void dump_rcs(__u32 *rc);
3304
3305#define IDX_INFO_MAGIC 0x3D37CC37
3306
3307/* Index file transfer through the network. The server serializes the index into
3308 * a byte stream which is sent to the client via a bulk transfer */
3309struct idx_info {
3310 __u32 ii_magic;
3311
3312 /* reply: see idx_info_flags below */
3313 __u32 ii_flags;
3314
3315 /* request & reply: number of lu_idxpage (to be) transferred */
3316 __u16 ii_count;
3317 __u16 ii_pad0;
3318
3319 /* request: requested attributes passed down to the iterator API */
3320 __u32 ii_attrs;
3321
3322 /* request & reply: index file identifier (FID) */
3323 struct lu_fid ii_fid;
3324
3325 /* reply: version of the index file before starting to walk the index.
3326 * Please note that the version can be modified at any time during the
3327 * transfer */
3328 __u64 ii_version;
3329
3330 /* request: hash to start with:
3331 * reply: hash of the first entry of the first lu_idxpage and hash
3332 * of the entry to read next if any */
3333 __u64 ii_hash_start;
3334 __u64 ii_hash_end;
3335
3336 /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3337 * set */
3338 __u16 ii_keysize;
3339
3340 /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3341 * is set */
3342 __u16 ii_recsize;
3343
3344 __u32 ii_pad1;
3345 __u64 ii_pad2;
3346 __u64 ii_pad3;
3347};
3348extern void lustre_swab_idx_info(struct idx_info *ii);
3349
3350#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
3351
3352/* List of flags used in idx_info::ii_flags */
3353enum idx_info_flags {
3354 II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
3355 II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
3356 II_FL_VARREC = 1 << 2, /* records can be of variable size */
3357 II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
3358};
3359
3360#define LIP_MAGIC 0x8A6D6B6C
3361
3362/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3363struct lu_idxpage {
3364 /* 16-byte header */
3365 __u32 lip_magic;
3366 __u16 lip_flags;
3367 __u16 lip_nr; /* number of entries in the container */
3368 __u64 lip_pad0; /* additional padding for future use */
3369
3370 /* key/record pairs are stored in the remaining 4080 bytes.
3371 * depending upon the flags in idx_info::ii_flags, each key/record
3372 * pair might be preceded by:
3373 * - a hash value
3374 * - the key size (II_FL_VARKEY is set)
3375 * - the record size (II_FL_VARREC is set)
3376 *
3377 * For the time being, we only support fixed-size key & record. */
3378 char lip_entries[0];
3379};
3380extern void lustre_swab_lip_header(struct lu_idxpage *lip);
3381
3382#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3383
3384/* Gather all possible type associated with a 4KB container */
3385union lu_page {
3386 struct lu_dirpage lp_dir; /* for MDS_READPAGE */
3387 struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
3388 char lp_array[LU_PAGE_SIZE];
3389};
3390
3391/* security opcodes */
3392typedef enum {
3393 SEC_CTX_INIT = 801,
3394 SEC_CTX_INIT_CONT = 802,
3395 SEC_CTX_FINI = 803,
3396 SEC_LAST_OPC,
3397 SEC_FIRST_OPC = SEC_CTX_INIT
3398} sec_cmd_t;
3399
3400/*
3401 * capa related definitions
3402 */
3403#define CAPA_HMAC_MAX_LEN 64
3404#define CAPA_HMAC_KEY_MAX_LEN 56
3405
3406/* NB take care when changing the sequence of elements this struct,
3407 * because the offset info is used in find_capa() */
3408struct lustre_capa {
3409 struct lu_fid lc_fid; /** fid */
3410 __u64 lc_opc; /** operations allowed */
3411 __u64 lc_uid; /** file owner */
3412 __u64 lc_gid; /** file group */
3413 __u32 lc_flags; /** HMAC algorithm & flags */
3414 __u32 lc_keyid; /** key# used for the capability */
3415 __u32 lc_timeout; /** capa timeout value (sec) */
3416 __u32 lc_expiry; /** expiry time (sec) */
3417 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3418} __attribute__((packed));
3419
3420extern void lustre_swab_lustre_capa(struct lustre_capa *c);
3421
3422/** lustre_capa::lc_opc */
3423enum {
3424 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3425 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3426 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3427 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3428 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3429 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3430 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3431 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3432 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3433 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3434 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3435};
3436
3437#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3438#define CAPA_OPC_MDS_ONLY \
3439 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3440 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3441#define CAPA_OPC_OSS_ONLY \
3442 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3443 CAPA_OPC_OSS_DESTROY)
3444#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3445#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3446
3447/* MDS capability covers object capability for operations of body r/w
3448 * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
3449 * while OSS capability only covers object capability for operations of
3450 * oss data(file content) r/w/truncate.
3451 */
3452static inline int capa_for_mds(struct lustre_capa *c)
3453{
3454 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
3455}
3456
3457static inline int capa_for_oss(struct lustre_capa *c)
3458{
3459 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
3460}
3461
3462/* lustre_capa::lc_hmac_alg */
3463enum {
3464 CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
3465 CAPA_HMAC_ALG_MAX,
3466};
3467
3468#define CAPA_FL_MASK 0x00ffffff
3469#define CAPA_HMAC_ALG_MASK 0xff000000
3470
3471struct lustre_capa_key {
3472 __u64 lk_seq; /**< mds# */
3473 __u32 lk_keyid; /**< key# */
3474 __u32 lk_padding;
3475 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3476} __attribute__((packed));
3477
3478extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
3479
3480/** The link ea holds 1 \a link_ea_entry for each hardlink */
3481#define LINK_EA_MAGIC 0x11EAF1DFUL
3482struct link_ea_header {
3483 __u32 leh_magic;
3484 __u32 leh_reccount;
3485 __u64 leh_len; /* total size */
3486 /* future use */
3487 __u32 padding1;
3488 __u32 padding2;
3489};
3490
3491/** Hardlink data is name and parent fid.
3492 * Stored in this crazy struct for maximum packing and endian-neutrality
3493 */
3494struct link_ea_entry {
3495 /** __u16 stored big-endian, unaligned */
3496 unsigned char lee_reclen[2];
3497 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3498 char lee_name[0];
3499}__attribute__((packed));
3500
3501/** fid2path request/reply structure */
3502struct getinfo_fid2path {
3503 struct lu_fid gf_fid;
3504 __u64 gf_recno;
3505 __u32 gf_linkno;
3506 __u32 gf_pathlen;
3507 char gf_path[0];
3508} __attribute__((packed));
3509
3510void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3511
3512enum {
3513 LAYOUT_INTENT_ACCESS = 0,
3514 LAYOUT_INTENT_READ = 1,
3515 LAYOUT_INTENT_WRITE = 2,
3516 LAYOUT_INTENT_GLIMPSE = 3,
3517 LAYOUT_INTENT_TRUNC = 4,
3518 LAYOUT_INTENT_RELEASE = 5,
3519 LAYOUT_INTENT_RESTORE = 6
3520};
3521
3522/* enqueue layout lock with intent */
3523struct layout_intent {
3524 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3525 __u32 li_flags;
3526 __u64 li_start;
3527 __u64 li_end;
3528};
3529
3530void lustre_swab_layout_intent(struct layout_intent *li);
3531
3532/**
3533 * On the wire version of hsm_progress structure.
3534 *
3535 * Contains the userspace hsm_progress and some internal fields.
3536 */
3537struct hsm_progress_kernel {
3538 /* Field taken from struct hsm_progress */
3539 lustre_fid hpk_fid;
3540 __u64 hpk_cookie;
3541 struct hsm_extent hpk_extent;
3542 __u16 hpk_flags;
3543 __u16 hpk_errval; /* positive val */
3544 __u32 hpk_padding1;
3545 /* Additional fields */
3546 __u64 hpk_data_version;
3547 __u64 hpk_padding2;
3548} __attribute__((packed));
3549
3550extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3551extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3552extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3553extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3554extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3555extern void lustre_swab_hsm_request(struct hsm_request *hr);
3556
3557/**
3558 * These are object update opcode under UPDATE_OBJ, which is currently
3559 * being used by cross-ref operations between MDT.
3560 *
3561 * During the cross-ref operation, the Master MDT, which the client send the
3562 * request to, will disassembly the operation into object updates, then OSP
3563 * will send these updates to the remote MDT to be executed.
3564 *
3565 * Update request format
3566 * magic: UPDATE_BUFFER_MAGIC_V1
3567 * Count: How many updates in the req.
3568 * bufs[0] : following are packets of object.
3569 * update[0]:
3570 * type: object_update_op, the op code of update
3571 * fid: The object fid of the update.
3572 * lens/bufs: other parameters of the update.
3573 * update[1]:
3574 * type: object_update_op, the op code of update
3575 * fid: The object fid of the update.
3576 * lens/bufs: other parameters of the update.
3577 * ..........
3578 * update[7]: type: object_update_op, the op code of update
3579 * fid: The object fid of the update.
3580 * lens/bufs: other parameters of the update.
3581 * Current 8 maxim updates per object update request.
3582 *
3583 *******************************************************************
3584 * update reply format:
3585 *
3586 * ur_version: UPDATE_REPLY_V1
3587 * ur_count: The count of the reply, which is usually equal
3588 * to the number of updates in the request.
3589 * ur_lens: The reply lengths of each object update.
3590 *
3591 * replies: 1st update reply [4bytes_ret: other body]
3592 * 2nd update reply [4bytes_ret: other body]
3593 * .....
3594 * nth update reply [4bytes_ret: other body]
3595 *
3596 * For each reply of the update, the format would be
3597 * result(4 bytes):Other stuff
3598 */
3599
3600#define UPDATE_MAX_OPS 10
3601#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
3602#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
3603#define UPDATE_BUF_COUNT 8
3604enum object_update_op {
3605 OBJ_CREATE = 1,
3606 OBJ_DESTROY = 2,
3607 OBJ_REF_ADD = 3,
3608 OBJ_REF_DEL = 4,
3609 OBJ_ATTR_SET = 5,
3610 OBJ_ATTR_GET = 6,
3611 OBJ_XATTR_SET = 7,
3612 OBJ_XATTR_GET = 8,
3613 OBJ_INDEX_LOOKUP = 9,
3614 OBJ_INDEX_INSERT = 10,
3615 OBJ_INDEX_DELETE = 11,
3616 OBJ_LAST
3617};
3618
3619struct update {
3620 __u32 u_type;
3621 __u32 u_batchid;
3622 struct lu_fid u_fid;
3623 __u32 u_lens[UPDATE_BUF_COUNT];
3624 __u32 u_bufs[0];
3625};
3626
3627struct update_buf {
3628 __u32 ub_magic;
3629 __u32 ub_count;
3630 __u32 ub_bufs[0];
3631};
3632
3633#define UPDATE_REPLY_V1 0x00BD0001
3634struct update_reply {
3635 __u32 ur_version;
3636 __u32 ur_count;
3637 __u32 ur_lens[0];
3638};
3639
3640void lustre_swab_update_buf(struct update_buf *ub);
3641void lustre_swab_update_reply_buf(struct update_reply *ur);
3642
3643/** layout swap request structure
3644 * fid1 and fid2 are in mdt_body
3645 */
3646struct mdc_swap_layouts {
3647 __u64 msl_flags;
3648} __packed;
3649
3650void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3651
3652#endif
3653/** @} lustreidl */
This page took 0.201143 seconds and 5 git commands to generate.