ceph: decode v5 of osdmap (pool names) [protocol change]
[deliverable/linux.git] / fs / ceph / rados.h
CommitLineData
0dee3c28
SW
1#ifndef __RADOS_H
2#define __RADOS_H
3
4/*
5 * Data types for the Ceph distributed object storage layer RADOS
6 * (Reliable Autonomic Distributed Object Store).
7 */
8
9#include "msgr.h"
10
02f90c61
SW
11/*
12 * osdmap encoding versions
13 */
2844a76a
SW
14#define CEPH_OSDMAP_INC_VERSION 5
15#define CEPH_OSDMAP_INC_VERSION_EXT 5
16#define CEPH_OSDMAP_VERSION 5
17#define CEPH_OSDMAP_VERSION_EXT 5
02f90c61 18
0dee3c28
SW
19/*
20 * fs id
21 */
22struct ceph_fsid {
23 unsigned char fsid[16];
24};
25
26static inline int ceph_fsid_compare(const struct ceph_fsid *a,
27 const struct ceph_fsid *b)
28{
29 return memcmp(a, b, sizeof(*a));
30}
31
32/*
33 * ino, object, etc.
34 */
35typedef __le64 ceph_snapid_t;
36#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */
37#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */
38#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */
39
40struct ceph_timespec {
41 __le32 tv_sec;
42 __le32 tv_nsec;
43} __attribute__ ((packed));
44
45
46/*
47 * object layout - how objects are mapped into PGs
48 */
49#define CEPH_OBJECT_LAYOUT_HASH 1
50#define CEPH_OBJECT_LAYOUT_LINEAR 2
51#define CEPH_OBJECT_LAYOUT_HASHINO 3
52
53/*
54 * pg layout -- how PGs are mapped onto (sets of) OSDs
55 */
56#define CEPH_PG_LAYOUT_CRUSH 0
57#define CEPH_PG_LAYOUT_HASH 1
58#define CEPH_PG_LAYOUT_LINEAR 2
59#define CEPH_PG_LAYOUT_HYBRID 3
60
61
62/*
63 * placement group.
64 * we encode this into one __le64.
65 */
51042122
SW
66struct ceph_pg {
67 __le16 preferred; /* preferred primary osd */
68 __le16 ps; /* placement seed */
69 __le32 pool; /* object pool */
0dee3c28
SW
70} __attribute__ ((packed));
71
72/*
73 * pg_pool is a set of pgs storing a pool of objects
74 *
75 * pg_num -- base number of pseudorandomly placed pgs
76 *
77 * pgp_num -- effective number when calculating pg placement. this
78 * is used for pg_num increases. new pgs result in data being "split"
79 * into new pgs. for this to proceed smoothly, new pgs are intiially
80 * colocated with their parents; that is, pgp_num doesn't increase
81 * until the new pgs have successfully split. only _then_ are the new
82 * pgs placed independently.
83 *
84 * lpg_num -- localized pg count (per device). replicas are randomly
85 * selected.
86 *
87 * lpgp_num -- as above.
88 */
89#define CEPH_PG_TYPE_REP 1
90#define CEPH_PG_TYPE_RAID4 2
02f90c61 91#define CEPH_PG_POOL_VERSION 2
0dee3c28
SW
92struct ceph_pg_pool {
93 __u8 type; /* CEPH_PG_TYPE_* */
94 __u8 size; /* number of osds in each pg */
95 __u8 crush_ruleset; /* crush placement rule */
1654dd0c 96 __u8 object_hash; /* hash mapping object name to ps */
0dee3c28
SW
97 __le32 pg_num, pgp_num; /* number of pg's */
98 __le32 lpg_num, lpgp_num; /* number of localized pg's */
99 __le32 last_change; /* most recent epoch changed */
100 __le64 snap_seq; /* seq for per-pool snapshot */
101 __le32 snap_epoch; /* epoch of last snap */
102 __le32 num_snaps;
103 __le32 num_removed_snap_intervals;
02f90c61 104 __le64 uid;
0dee3c28
SW
105} __attribute__ ((packed));
106
107/*
108 * stable_mod func is used to control number of placement groups.
109 * similar to straight-up modulo, but produces a stable mapping as b
110 * increases over time. b is the number of bins, and bmask is the
111 * containing power of 2 minus 1.
112 *
113 * b <= bmask and bmask=(2**n)-1
114 * e.g., b=12 -> bmask=15, b=123 -> bmask=127
115 */
116static inline int ceph_stable_mod(int x, int b, int bmask)
117{
118 if ((x & bmask) < b)
119 return x & bmask;
120 else
121 return x & (bmask >> 1);
122}
123
124/*
125 * object layout - how a given object should be stored.
126 */
127struct ceph_object_layout {
51042122 128 struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
0dee3c28
SW
129 __le32 ol_stripe_unit; /* for per-object parity, if any */
130} __attribute__ ((packed));
131
132/*
133 * compound epoch+version, used by storage layer to serialize mutations
134 */
135struct ceph_eversion {
136 __le32 epoch;
137 __le64 version;
138} __attribute__ ((packed));
139
140/*
141 * osd map bits
142 */
143
144/* status bits */
145#define CEPH_OSD_EXISTS 1
146#define CEPH_OSD_UP 2
147
148/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
149#define CEPH_OSD_IN 0x10000
150#define CEPH_OSD_OUT 0
151
152
153/*
154 * osd map flag bits
155 */
156#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
157#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
158#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
159#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
160#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
161
162/*
163 * osd ops
164 */
165#define CEPH_OSD_OP_MODE 0xf000
166#define CEPH_OSD_OP_MODE_RD 0x1000
167#define CEPH_OSD_OP_MODE_WR 0x2000
168#define CEPH_OSD_OP_MODE_RMW 0x3000
169#define CEPH_OSD_OP_MODE_SUB 0x4000
0dee3c28
SW
170
171#define CEPH_OSD_OP_TYPE 0x0f00
172#define CEPH_OSD_OP_TYPE_LOCK 0x0100
173#define CEPH_OSD_OP_TYPE_DATA 0x0200
174#define CEPH_OSD_OP_TYPE_ATTR 0x0300
175#define CEPH_OSD_OP_TYPE_EXEC 0x0400
176#define CEPH_OSD_OP_TYPE_PG 0x0500
177
178enum {
179 /** data **/
180 /* read */
181 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
182 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
183
184 /* fancy read */
185 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
186
187 /* write */
188 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
189 CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
190 CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
191 CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
192 CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
193
194 /* fancy write */
195 CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
196 CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
197 CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
198 CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
199
200 CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
201 CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
202 CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
203
204 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
205
206 /** attrs **/
207 /* read */
208 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
209 CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
210
211 /* write */
212 CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
213 CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
214 CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
215 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
216
217 /** subop **/
218 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
219 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
220 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
221 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
222 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
223
224 /** lock **/
225 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
226 CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
227 CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
228 CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
229 CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
230 CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
231
232 /** exec **/
233 CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
234
235 /** pg **/
236 CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
237};
238
239static inline int ceph_osd_op_type_lock(int op)
240{
241 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK;
242}
243static inline int ceph_osd_op_type_data(int op)
244{
245 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA;
246}
247static inline int ceph_osd_op_type_attr(int op)
248{
249 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR;
250}
251static inline int ceph_osd_op_type_exec(int op)
252{
253 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC;
254}
255static inline int ceph_osd_op_type_pg(int op)
256{
257 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
258}
259
260static inline int ceph_osd_op_mode_subop(int op)
261{
262 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB;
263}
264static inline int ceph_osd_op_mode_read(int op)
265{
266 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
267}
268static inline int ceph_osd_op_mode_modify(int op)
269{
270 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
271}
272
273#define CEPH_OSD_TMAP_HDR 'h'
274#define CEPH_OSD_TMAP_SET 's'
275#define CEPH_OSD_TMAP_RM 'r'
276
277extern const char *ceph_osd_op_name(int op);
278
279
280/*
281 * osd op flags
282 *
283 * An op may be READ, WRITE, or READ|WRITE.
284 */
285enum {
286 CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
287 CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
288 CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
289 CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
290 CEPH_OSD_FLAG_READ = 16, /* op may read */
291 CEPH_OSD_FLAG_WRITE = 32, /* op may write */
292 CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
293 CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
294 CEPH_OSD_FLAG_BALANCE_READS = 256,
295 CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
296 CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
4e7a5dcd 297 CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
0dee3c28
SW
298};
299
300enum {
301 CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
302};
303
304#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
305#define EBLACKLISTED ESHUTDOWN /* blacklisted */
306
307/*
308 * an individual object operation. each may be accompanied by some data
309 * payload
310 */
311struct ceph_osd_op {
312 __le16 op; /* CEPH_OSD_OP_* */
313 __le32 flags; /* CEPH_OSD_FLAG_* */
314 union {
315 struct {
316 __le64 offset, length;
0c948992
YS
317 __le64 truncate_size;
318 __le32 truncate_seq;
0dee3c28
SW
319 } __attribute__ ((packed)) extent;
320 struct {
321 __le32 name_len;
322 __le32 value_len;
323 } __attribute__ ((packed)) xattr;
0dee3c28
SW
324 struct {
325 __u8 class_len;
326 __u8 method_len;
327 __u8 argc;
328 __le32 indata_len;
329 } __attribute__ ((packed)) cls;
330 struct {
331 __le64 cookie, count;
332 } __attribute__ ((packed)) pgls;
333 };
334 __le32 payload_len;
335} __attribute__ ((packed));
336
337/*
338 * osd request message header. each request may include multiple
339 * ceph_osd_op object operations.
340 */
341struct ceph_osd_request_head {
0dee3c28
SW
342 __le32 client_inc; /* client incarnation */
343 struct ceph_object_layout layout; /* pgid */
344 __le32 osdmap_epoch; /* client's osdmap epoch */
345
346 __le32 flags;
347
348 struct ceph_timespec mtime; /* for mutations only */
349 struct ceph_eversion reassert_version; /* if we are replaying op */
350
351 __le32 object_len; /* length of object name */
352
353 __le64 snapid; /* snapid to read */
354 __le64 snap_seq; /* writer's snap context */
355 __le32 num_snaps;
356
357 __le16 num_ops;
358 struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
359} __attribute__ ((packed));
360
361struct ceph_osd_reply_head {
0dee3c28
SW
362 __le32 client_inc; /* client incarnation */
363 __le32 flags;
364 struct ceph_object_layout layout;
365 __le32 osdmap_epoch;
366 struct ceph_eversion reassert_version; /* for replaying uncommitted */
367
368 __le32 result; /* result code */
369
370 __le32 object_len; /* length of object name */
371 __le32 num_ops;
372 struct ceph_osd_op ops[0]; /* ops[], object */
373} __attribute__ ((packed));
374
375
376#endif
This page took 0.047418 seconds and 5 git commands to generate.