Merge remote-tracking branch 'ipsec-next/master'
[deliverable/linux.git] / include / linux / ceph / osdmap.h
CommitLineData
f24e9980
SW
1#ifndef _FS_CEPH_OSDMAP_H
2#define _FS_CEPH_OSDMAP_H
3
4#include <linux/rbtree.h>
a1ce3928 5#include <linux/ceph/types.h>
ef4859d6 6#include <linux/ceph/decode.h>
a1ce3928 7#include <linux/ceph/ceph_fs.h>
3d14c5d2 8#include <linux/crush/crush.h>
f24e9980
SW
9
10/*
11 * The osd map describes the current membership of the osd cluster and
12 * specifies the mapping of objects to placement groups and placement
13 * groups to (sets of) osds. That is, it completely specifies the
14 * (desired) distribution of all data objects in the system at some
15 * point in time.
16 *
17 * Each map version is identified by an epoch, which increases monotonically.
18 *
19 * The map can be updated either via an incremental map (diff) describing
20 * the change between two successive epochs, or as a fully encoded map.
21 */
5b191d99
SW
22struct ceph_pg {
23 uint64_t pool;
24 uint32_t seed;
25};
26
f984cb76
ID
27int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
28
04812acf
ID
29#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
30 together */
63244fa1 31#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
83ca14fd 32
f24e9980 33struct ceph_pg_pool_info {
4fc51be8 34 struct rb_node node;
4f6a7e5e 35 s64 id;
04812acf 36 u8 type; /* CEPH_POOL_TYPE_* */
4f6a7e5e 37 u8 size;
04812acf 38 u8 min_size;
4f6a7e5e
SW
39 u8 crush_ruleset;
40 u8 object_hash;
04812acf 41 u32 last_force_request_resend;
4f6a7e5e
SW
42 u32 pg_num, pgp_num;
43 int pg_num_mask, pgp_num_mask;
17a13e40
ID
44 s64 read_tier;
45 s64 write_tier; /* wins for read+write ops */
04812acf 46 u64 flags; /* CEPH_POOL_FLAG_* */
2844a76a 47 char *name;
42c1b124
ID
48
49 bool was_full; /* for handle_one_map() */
f24e9980
SW
50};
51
2abebdbc
ID
52static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
53{
54 switch (pool->type) {
55 case CEPH_POOL_TYPE_REP:
56 return true;
57 case CEPH_POOL_TYPE_EC:
58 return false;
59 default:
60 BUG_ON(1);
61 }
62}
63
4f6a7e5e 64struct ceph_object_locator {
22116525 65 s64 pool;
30c156d9 66 struct ceph_string *pool_ns;
4f6a7e5e
SW
67};
68
63244fa1
ID
69static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
70{
71 oloc->pool = -1;
30c156d9 72 oloc->pool_ns = NULL;
63244fa1
ID
73}
74
75static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
76{
77 return oloc->pool == -1;
78}
79
30c156d9
YZ
80void ceph_oloc_copy(struct ceph_object_locator *dest,
81 const struct ceph_object_locator *src);
82void ceph_oloc_destroy(struct ceph_object_locator *oloc);
63244fa1 83
4295f221
ID
84/*
85 * Maximum supported by kernel client object name length
86 *
87 * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
88 */
89#define CEPH_MAX_OID_NAME_LEN 100
90
d30291b9
ID
91/*
92 * 51-char inline_name is long enough for all cephfs and all but one
93 * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
94 * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
95 * other rbd requests fit into inline_name.
96 *
97 * Makes ceph_object_id 64 bytes on 64-bit.
98 */
99#define CEPH_OID_INLINE_LEN 52
100
101/*
102 * Both inline and external buffers have space for a NUL-terminator,
103 * which is carried around. It's not required though - RADOS object
104 * names don't have to be NUL-terminated and may contain NULs.
105 */
4295f221 106struct ceph_object_id {
d30291b9
ID
107 char *name;
108 char inline_name[CEPH_OID_INLINE_LEN];
4295f221
ID
109 int name_len;
110};
111
d30291b9
ID
112static inline void ceph_oid_init(struct ceph_object_id *oid)
113{
114 oid->name = oid->inline_name;
115 oid->name_len = 0;
116}
117
281dbe5d
ID
118#define CEPH_OID_INIT_ONSTACK(oid) \
119 ({ ceph_oid_init(&oid); oid; })
120#define CEPH_DEFINE_OID_ONSTACK(oid) \
121 struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
122
d30291b9
ID
123static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
124{
125 return oid->name == oid->inline_name && !oid->name_len;
126}
127
128void ceph_oid_copy(struct ceph_object_id *dest,
129 const struct ceph_object_id *src);
130__printf(2, 3)
131void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
132__printf(3, 4)
133int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
134 const char *fmt, ...);
135void ceph_oid_destroy(struct ceph_object_id *oid);
136
f24e9980
SW
137struct ceph_pg_mapping {
138 struct rb_node node;
5b191d99 139 struct ceph_pg pgid;
35a935d7
ID
140
141 union {
142 struct {
143 int len;
144 int osds[];
145 } pg_temp;
9686f94c
ID
146 struct {
147 int osd;
148 } primary_temp;
35a935d7 149 };
f24e9980
SW
150};
151
152struct ceph_osdmap {
153 struct ceph_fsid fsid;
154 u32 epoch;
f24e9980
SW
155 struct ceph_timespec created, modified;
156
157 u32 flags; /* CEPH_OSDMAP_* */
158
159 u32 max_osd; /* size of osd_state, _offload, _addr arrays */
160 u8 *osd_state; /* CEPH_OSD_* */
161 u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
162 struct ceph_entity_addr *osd_addr;
163
164 struct rb_root pg_temp;
9686f94c
ID
165 struct rb_root primary_temp;
166
2cfa34f2
ID
167 u32 *osd_primary_affinity;
168
4fc51be8
SW
169 struct rb_root pg_pools;
170 u32 pool_max;
f24e9980
SW
171
172 /* the CRUSH map specifies the mapping of placement groups to
173 * the list of osds that store+replicate them. */
174 struct crush_map *crush;
9d521470
ID
175
176 struct mutex crush_scratch_mutex;
177 int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
f24e9980
SW
178};
179
3b33f692 180static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
246138fa
ID
181{
182 return osd >= 0 && osd < map->max_osd &&
183 (map->osd_state[osd] & CEPH_OSD_EXISTS);
184}
185
3b33f692 186static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
f24e9980 187{
246138fa
ID
188 return ceph_osd_exists(map, osd) &&
189 (map->osd_state[osd] & CEPH_OSD_UP);
190}
191
3b33f692 192static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
246138fa
ID
193{
194 return !ceph_osd_is_up(map, osd);
f24e9980
SW
195}
196
f24e9980 197extern char *ceph_osdmap_state_str(char *str, int len, int state);
2cfa34f2 198extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
f24e9980
SW
199
200static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
201 int osd)
202{
203 if (osd >= map->max_osd)
204 return NULL;
205 return &map->osd_addr[osd];
206}
207
ef4859d6
AE
208static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
209{
210 __u8 version;
211
212 if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
3ef650d3 213 pr_warn("incomplete pg encoding\n");
ef4859d6
AE
214 return -EINVAL;
215 }
216 version = ceph_decode_8(p);
217 if (version > 1) {
3ef650d3 218 pr_warn("do not understand pg encoding %d > 1\n",
ef4859d6
AE
219 (int)version);
220 return -EINVAL;
221 }
222
223 pgid->pool = ceph_decode_64(p);
224 pgid->seed = ceph_decode_32(p);
225 *p += 4; /* skip deprecated preferred value */
226
227 return 0;
228}
229
e5253a7b 230struct ceph_osdmap *ceph_osdmap_alloc(void);
a2505d63 231extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
0c0a8de1
ID
232struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
233 struct ceph_osdmap *map);
f24e9980
SW
234extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
235
6f3bfd45
ID
236struct ceph_osds {
237 int osds[CEPH_PG_MAX_SIZE];
238 int size;
239 int primary; /* id, NOT index */
240};
241
242static inline void ceph_osds_init(struct ceph_osds *set)
243{
244 set->size = 0;
245 set->primary = -1;
246}
247
248void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
249
63244fa1
ID
250bool ceph_is_new_interval(const struct ceph_osds *old_acting,
251 const struct ceph_osds *new_acting,
252 const struct ceph_osds *old_up,
253 const struct ceph_osds *new_up,
254 int old_size,
255 int new_size,
256 int old_min_size,
257 int new_min_size,
258 u32 old_pg_num,
259 u32 new_pg_num,
260 bool old_sort_bitwise,
261 bool new_sort_bitwise,
262 const struct ceph_pg *pgid);
263bool ceph_osds_changed(const struct ceph_osds *old_acting,
264 const struct ceph_osds *new_acting,
265 bool any_change);
266
f24e9980 267/* calculate mapping of a file extent to an object */
d63b77f4 268extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
e8afad65 269 u64 off, u64 len,
d63b77f4 270 u64 *bno, u64 *oxoff, u64 *oxlen);
f24e9980 271
d9591f5e
ID
272int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
273 struct ceph_object_id *oid,
274 struct ceph_object_locator *oloc,
275 struct ceph_pg *raw_pgid);
7c13cb64 276
6f3bfd45
ID
277void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
278 const struct ceph_pg *raw_pgid,
279 struct ceph_osds *up,
280 struct ceph_osds *acting);
f81f1633
ID
281int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
282 const struct ceph_pg *raw_pgid);
f24e9980 283
ce7f6a27
ID
284extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
285 u64 id);
286
72afc71f 287extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
7669a2c9
YS
288extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
289
f24e9980 290#endif
This page took 0.399983 seconds and 5 git commands to generate.