Include gdb_assert.h in common-defs.h
[deliverable/binutils-gdb.git] / gdb / common / vec.h
1 /* Vector API for GDB.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
3 Contributed by Nathan Sidwell <nathan@codesourcery.com>
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #if !defined (GDB_VEC_H)
21 #define GDB_VEC_H
22
23 #include <string.h>
24
25 /* The macros here implement a set of templated vector types and
26 associated interfaces. These templates are implemented with
27 macros, as we're not in C++ land. The interface functions are
28 typesafe and use static inline functions, sometimes backed by
29 out-of-line generic functions.
30
31 Because of the different behavior of structure objects, scalar
32 objects and of pointers, there are three flavors, one for each of
33 these variants. Both the structure object and pointer variants
34 pass pointers to objects around -- in the former case the pointers
35 are stored into the vector and in the latter case the pointers are
36 dereferenced and the objects copied into the vector. The scalar
37 object variant is suitable for int-like objects, and the vector
38 elements are returned by value.
39
40 There are both 'index' and 'iterate' accessors. The iterator
41 returns a boolean iteration condition and updates the iteration
42 variable passed by reference. Because the iterator will be
43 inlined, the address-of can be optimized away.
44
45 The vectors are implemented using the trailing array idiom, thus
46 they are not resizeable without changing the address of the vector
47 object itself. This means you cannot have variables or fields of
48 vector type -- always use a pointer to a vector. The one exception
49 is the final field of a structure, which could be a vector type.
50 You will have to use the embedded_size & embedded_init calls to
51 create such objects, and they will probably not be resizeable (so
52 don't use the 'safe' allocation variants). The trailing array
53 idiom is used (rather than a pointer to an array of data), because,
54 if we allow NULL to also represent an empty vector, empty vectors
55 occupy minimal space in the structure containing them.
56
57 Each operation that increases the number of active elements is
58 available in 'quick' and 'safe' variants. The former presumes that
59 there is sufficient allocated space for the operation to succeed
60 (it dies if there is not). The latter will reallocate the
61 vector, if needed. Reallocation causes an exponential increase in
62 vector size. If you know you will be adding N elements, it would
63 be more efficient to use the reserve operation before adding the
64 elements with the 'quick' operation. This will ensure there are at
65 least as many elements as you ask for, it will exponentially
66 increase if there are too few spare slots. If you want reserve a
67 specific number of slots, but do not want the exponential increase
68 (for instance, you know this is the last allocation), use a
69 negative number for reservation. You can also create a vector of a
70 specific size from the get go.
71
72 You should prefer the push and pop operations, as they append and
73 remove from the end of the vector. If you need to remove several
74 items in one go, use the truncate operation. The insert and remove
75 operations allow you to change elements in the middle of the
76 vector. There are two remove operations, one which preserves the
77 element ordering 'ordered_remove', and one which does not
78 'unordered_remove'. The latter function copies the end element
79 into the removed slot, rather than invoke a memmove operation. The
80 'lower_bound' function will determine where to place an item in the
81 array using insert that will maintain sorted order.
82
83 If you need to directly manipulate a vector, then the 'address'
84 accessor will return the address of the start of the vector. Also
85 the 'space' predicate will tell you whether there is spare capacity
86 in the vector. You will not normally need to use these two functions.
87
88 Vector types are defined using a DEF_VEC_{O,P,I}(TYPEDEF) macro.
89 Variables of vector type are declared using a VEC(TYPEDEF) macro.
90 The characters O, P and I indicate whether TYPEDEF is a pointer
91 (P), object (O) or integral (I) type. Be careful to pick the
92 correct one, as you'll get an awkward and inefficient API if you
93 use the wrong one. There is a check, which results in a
94 compile-time warning, for the P and I versions, but there is no
95 check for the O versions, as that is not possible in plain C.
96
97 An example of their use would be,
98
99 DEF_VEC_P(tree); // non-managed tree vector.
100
101 struct my_struct {
102 VEC(tree) *v; // A (pointer to) a vector of tree pointers.
103 };
104
105 struct my_struct *s;
106
107 if (VEC_length(tree, s->v)) { we have some contents }
108 VEC_safe_push(tree, s->v, decl); // append some decl onto the end
109 for (ix = 0; VEC_iterate(tree, s->v, ix, elt); ix++)
110 { do something with elt }
111
112 */
113
114 /* Macros to invoke API calls. A single macro works for both pointer
115 and object vectors, but the argument and return types might well be
116 different. In each macro, T is the typedef of the vector elements.
117 Some of these macros pass the vector, V, by reference (by taking
118 its address), this is noted in the descriptions. */
119
120 /* Length of vector
121 unsigned VEC_T_length(const VEC(T) *v);
122
123 Return the number of active elements in V. V can be NULL, in which
124 case zero is returned. */
125
126 #define VEC_length(T,V) (VEC_OP(T,length)(V))
127
128
129 /* Check if vector is empty
130 int VEC_T_empty(const VEC(T) *v);
131
132 Return nonzero if V is an empty vector (or V is NULL), zero otherwise. */
133
134 #define VEC_empty(T,V) (VEC_length (T,V) == 0)
135
136
137 /* Get the final element of the vector.
138 T VEC_T_last(VEC(T) *v); // Integer
139 T VEC_T_last(VEC(T) *v); // Pointer
140 T *VEC_T_last(VEC(T) *v); // Object
141
142 Return the final element. V must not be empty. */
143
144 #define VEC_last(T,V) (VEC_OP(T,last)(V VEC_ASSERT_INFO))
145
146 /* Index into vector
147 T VEC_T_index(VEC(T) *v, unsigned ix); // Integer
148 T VEC_T_index(VEC(T) *v, unsigned ix); // Pointer
149 T *VEC_T_index(VEC(T) *v, unsigned ix); // Object
150
151 Return the IX'th element. If IX must be in the domain of V. */
152
153 #define VEC_index(T,V,I) (VEC_OP(T,index)(V,I VEC_ASSERT_INFO))
154
155 /* Iterate over vector
156 int VEC_T_iterate(VEC(T) *v, unsigned ix, T &ptr); // Integer
157 int VEC_T_iterate(VEC(T) *v, unsigned ix, T &ptr); // Pointer
158 int VEC_T_iterate(VEC(T) *v, unsigned ix, T *&ptr); // Object
159
160 Return iteration condition and update PTR to point to the IX'th
161 element. At the end of iteration, sets PTR to NULL. Use this to
162 iterate over the elements of a vector as follows,
163
164 for (ix = 0; VEC_iterate(T,v,ix,ptr); ix++)
165 continue; */
166
167 #define VEC_iterate(T,V,I,P) (VEC_OP(T,iterate)(V,I,&(P)))
168
169 /* Allocate new vector.
170 VEC(T,A) *VEC_T_alloc(int reserve);
171
172 Allocate a new vector with space for RESERVE objects. If RESERVE
173 is zero, NO vector is created. */
174
175 #define VEC_alloc(T,N) (VEC_OP(T,alloc)(N))
176
177 /* Free a vector.
178 void VEC_T_free(VEC(T,A) *&);
179
180 Free a vector and set it to NULL. */
181
182 #define VEC_free(T,V) (VEC_OP(T,free)(&V))
183
184 /* A cleanup function for a vector.
185 void VEC_T_cleanup(void *);
186
187 Clean up a vector. */
188
189 #define VEC_cleanup(T) (VEC_OP(T,cleanup))
190
191 /* Use these to determine the required size and initialization of a
192 vector embedded within another structure (as the final member).
193
194 size_t VEC_T_embedded_size(int reserve);
195 void VEC_T_embedded_init(VEC(T) *v, int reserve);
196
197 These allow the caller to perform the memory allocation. */
198
199 #define VEC_embedded_size(T,N) (VEC_OP(T,embedded_size)(N))
200 #define VEC_embedded_init(T,O,N) (VEC_OP(T,embedded_init)(VEC_BASE(O),N))
201
202 /* Copy a vector.
203 VEC(T,A) *VEC_T_copy(VEC(T) *);
204
205 Copy the live elements of a vector into a new vector. The new and
206 old vectors need not be allocated by the same mechanism. */
207
208 #define VEC_copy(T,V) (VEC_OP(T,copy)(V))
209
210 /* Merge two vectors.
211 VEC(T,A) *VEC_T_merge(VEC(T) *, VEC(T) *);
212
213 Copy the live elements of both vectors into a new vector. The new
214 and old vectors need not be allocated by the same mechanism. */
215 #define VEC_merge(T,V1,V2) (VEC_OP(T,merge)(V1, V2))
216
217 /* Determine if a vector has additional capacity.
218
219 int VEC_T_space (VEC(T) *v,int reserve)
220
221 If V has space for RESERVE additional entries, return nonzero. You
222 usually only need to use this if you are doing your own vector
223 reallocation, for instance on an embedded vector. This returns
224 nonzero in exactly the same circumstances that VEC_T_reserve
225 will. */
226
227 #define VEC_space(T,V,R) (VEC_OP(T,space)(V,R VEC_ASSERT_INFO))
228
229 /* Reserve space.
230 int VEC_T_reserve(VEC(T,A) *&v, int reserve);
231
232 Ensure that V has at least abs(RESERVE) slots available. The
233 signedness of RESERVE determines the reallocation behavior. A
234 negative value will not create additional headroom beyond that
235 requested. A positive value will create additional headroom. Note
236 this can cause V to be reallocated. Returns nonzero iff
237 reallocation actually occurred. */
238
239 #define VEC_reserve(T,V,R) (VEC_OP(T,reserve)(&(V),R VEC_ASSERT_INFO))
240
241 /* Push object with no reallocation
242 T *VEC_T_quick_push (VEC(T) *v, T obj); // Integer
243 T *VEC_T_quick_push (VEC(T) *v, T obj); // Pointer
244 T *VEC_T_quick_push (VEC(T) *v, T *obj); // Object
245
246 Push a new element onto the end, returns a pointer to the slot
247 filled in. For object vectors, the new value can be NULL, in which
248 case NO initialization is performed. There must
249 be sufficient space in the vector. */
250
251 #define VEC_quick_push(T,V,O) (VEC_OP(T,quick_push)(V,O VEC_ASSERT_INFO))
252
253 /* Push object with reallocation
254 T *VEC_T_safe_push (VEC(T,A) *&v, T obj); // Integer
255 T *VEC_T_safe_push (VEC(T,A) *&v, T obj); // Pointer
256 T *VEC_T_safe_push (VEC(T,A) *&v, T *obj); // Object
257
258 Push a new element onto the end, returns a pointer to the slot
259 filled in. For object vectors, the new value can be NULL, in which
260 case NO initialization is performed. Reallocates V, if needed. */
261
262 #define VEC_safe_push(T,V,O) (VEC_OP(T,safe_push)(&(V),O VEC_ASSERT_INFO))
263
264 /* Pop element off end
265 T VEC_T_pop (VEC(T) *v); // Integer
266 T VEC_T_pop (VEC(T) *v); // Pointer
267 void VEC_T_pop (VEC(T) *v); // Object
268
269 Pop the last element off the end. Returns the element popped, for
270 pointer vectors. */
271
272 #define VEC_pop(T,V) (VEC_OP(T,pop)(V VEC_ASSERT_INFO))
273
274 /* Truncate to specific length
275 void VEC_T_truncate (VEC(T) *v, unsigned len);
276
277 Set the length as specified. The new length must be less than or
278 equal to the current length. This is an O(1) operation. */
279
280 #define VEC_truncate(T,V,I) \
281 (VEC_OP(T,truncate)(V,I VEC_ASSERT_INFO))
282
283 /* Grow to a specific length.
284 void VEC_T_safe_grow (VEC(T,A) *&v, int len);
285
286 Grow the vector to a specific length. The LEN must be as
287 long or longer than the current length. The new elements are
288 uninitialized. */
289
290 #define VEC_safe_grow(T,V,I) \
291 (VEC_OP(T,safe_grow)(&(V),I VEC_ASSERT_INFO))
292
293 /* Replace element
294 T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Integer
295 T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Pointer
296 T *VEC_T_replace (VEC(T) *v, unsigned ix, T *val); // Object
297
298 Replace the IXth element of V with a new value, VAL. For pointer
299 vectors returns the original value. For object vectors returns a
300 pointer to the new value. For object vectors the new value can be
301 NULL, in which case no overwriting of the slot is actually
302 performed. */
303
304 #define VEC_replace(T,V,I,O) (VEC_OP(T,replace)(V,I,O VEC_ASSERT_INFO))
305
306 /* Insert object with no reallocation
307 T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Integer
308 T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Pointer
309 T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T *val); // Object
310
311 Insert an element, VAL, at the IXth position of V. Return a pointer
312 to the slot created. For vectors of object, the new value can be
313 NULL, in which case no initialization of the inserted slot takes
314 place. There must be sufficient space. */
315
316 #define VEC_quick_insert(T,V,I,O) \
317 (VEC_OP(T,quick_insert)(V,I,O VEC_ASSERT_INFO))
318
319 /* Insert object with reallocation
320 T *VEC_T_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Integer
321 T *VEC_T_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Pointer
322 T *VEC_T_safe_insert (VEC(T,A) *&v, unsigned ix, T *val); // Object
323
324 Insert an element, VAL, at the IXth position of V. Return a pointer
325 to the slot created. For vectors of object, the new value can be
326 NULL, in which case no initialization of the inserted slot takes
327 place. Reallocate V, if necessary. */
328
329 #define VEC_safe_insert(T,V,I,O) \
330 (VEC_OP(T,safe_insert)(&(V),I,O VEC_ASSERT_INFO))
331
332 /* Remove element retaining order
333 T VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Integer
334 T VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Pointer
335 void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Object
336
337 Remove an element from the IXth position of V. Ordering of
338 remaining elements is preserved. For pointer vectors returns the
339 removed object. This is an O(N) operation due to a memmove. */
340
341 #define VEC_ordered_remove(T,V,I) \
342 (VEC_OP(T,ordered_remove)(V,I VEC_ASSERT_INFO))
343
344 /* Remove element destroying order
345 T VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Integer
346 T VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Pointer
347 void VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Object
348
349 Remove an element from the IXth position of V. Ordering of
350 remaining elements is destroyed. For pointer vectors returns the
351 removed object. This is an O(1) operation. */
352
353 #define VEC_unordered_remove(T,V,I) \
354 (VEC_OP(T,unordered_remove)(V,I VEC_ASSERT_INFO))
355
356 /* Remove a block of elements
357 void VEC_T_block_remove (VEC(T) *v, unsigned ix, unsigned len);
358
359 Remove LEN elements starting at the IXth. Ordering is retained.
360 This is an O(N) operation due to memmove. */
361
362 #define VEC_block_remove(T,V,I,L) \
363 (VEC_OP(T,block_remove)(V,I,L VEC_ASSERT_INFO))
364
365 /* Get the address of the array of elements
366 T *VEC_T_address (VEC(T) v)
367
368 If you need to directly manipulate the array (for instance, you
369 want to feed it to qsort), use this accessor. */
370
371 #define VEC_address(T,V) (VEC_OP(T,address)(V))
372
373 /* Find the first index in the vector not less than the object.
374 unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
375 int (*lessthan) (const T, const T)); // Integer
376 unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
377 int (*lessthan) (const T, const T)); // Pointer
378 unsigned VEC_T_lower_bound (VEC(T) *v, const T *val,
379 int (*lessthan) (const T*, const T*)); // Object
380
381 Find the first position in which VAL could be inserted without
382 changing the ordering of V. LESSTHAN is a function that returns
383 true if the first argument is strictly less than the second. */
384
385 #define VEC_lower_bound(T,V,O,LT) \
386 (VEC_OP(T,lower_bound)(V,O,LT VEC_ASSERT_INFO))
387
388 /* Reallocate an array of elements with prefix. */
389 extern void *vec_p_reserve (void *, int);
390 extern void *vec_o_reserve (void *, int, size_t, size_t);
391 #define vec_free_(V) xfree (V)
392
393 #define VEC_ASSERT_INFO ,__FILE__,__LINE__
394 #define VEC_ASSERT_DECL ,const char *file_,unsigned line_
395 #define VEC_ASSERT_PASS ,file_,line_
396 #define vec_assert(expr, op) \
397 ((void)((expr) ? 0 : (gdb_assert_fail (op, file_, line_, \
398 FUNCTION_NAME), 0)))
399
400 #define VEC(T) VEC_##T
401 #define VEC_OP(T,OP) VEC_##T##_##OP
402
403 #define VEC_T(T) \
404 typedef struct VEC(T) \
405 { \
406 unsigned num; \
407 unsigned alloc; \
408 T vec[1]; \
409 } VEC(T)
410
411 /* Vector of integer-like object. */
412 #define DEF_VEC_I(T) \
413 static inline void VEC_OP (T,must_be_integral_type) (void) \
414 { \
415 (void)~(T)0; \
416 } \
417 \
418 VEC_T(T); \
419 DEF_VEC_FUNC_P(T) \
420 DEF_VEC_ALLOC_FUNC_I(T) \
421 struct vec_swallow_trailing_semi
422
423 /* Vector of pointer to object. */
424 #define DEF_VEC_P(T) \
425 static inline void VEC_OP (T,must_be_pointer_type) (void) \
426 { \
427 (void)((T)1 == (void *)1); \
428 } \
429 \
430 VEC_T(T); \
431 DEF_VEC_FUNC_P(T) \
432 DEF_VEC_ALLOC_FUNC_P(T) \
433 struct vec_swallow_trailing_semi
434
435 /* Vector of object. */
436 #define DEF_VEC_O(T) \
437 VEC_T(T); \
438 DEF_VEC_FUNC_O(T) \
439 DEF_VEC_ALLOC_FUNC_O(T) \
440 struct vec_swallow_trailing_semi
441
442 #define DEF_VEC_ALLOC_FUNC_I(T) \
443 static inline VEC(T) *VEC_OP (T,alloc) \
444 (int alloc_) \
445 { \
446 /* We must request exact size allocation, hence the negation. */ \
447 return (VEC(T) *) vec_o_reserve (NULL, -alloc_, \
448 offsetof (VEC(T),vec), sizeof (T)); \
449 } \
450 \
451 static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) \
452 { \
453 size_t len_ = vec_ ? vec_->num : 0; \
454 VEC (T) *new_vec_ = NULL; \
455 \
456 if (len_) \
457 { \
458 /* We must request exact size allocation, hence the negation. */ \
459 new_vec_ = (VEC (T) *) \
460 vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); \
461 \
462 new_vec_->num = len_; \
463 memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); \
464 } \
465 return new_vec_; \
466 } \
467 \
468 static inline VEC(T) *VEC_OP (T,merge) (VEC(T) *vec1_, VEC(T) *vec2_) \
469 { \
470 if (vec1_ && vec2_) \
471 { \
472 size_t len_ = vec1_->num + vec2_->num; \
473 VEC (T) *new_vec_ = NULL; \
474 \
475 /* We must request exact size allocation, hence the negation. */ \
476 new_vec_ = (VEC (T) *) \
477 vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); \
478 \
479 new_vec_->num = len_; \
480 memcpy (new_vec_->vec, vec1_->vec, sizeof (T) * vec1_->num); \
481 memcpy (new_vec_->vec + vec1_->num, vec2_->vec, \
482 sizeof (T) * vec2_->num); \
483 \
484 return new_vec_; \
485 } \
486 else \
487 return VEC_copy (T, vec1_ ? vec1_ : vec2_); \
488 } \
489 \
490 static inline void VEC_OP (T,free) \
491 (VEC(T) **vec_) \
492 { \
493 if (*vec_) \
494 vec_free_ (*vec_); \
495 *vec_ = NULL; \
496 } \
497 \
498 static inline void VEC_OP (T,cleanup) \
499 (void *arg_) \
500 { \
501 VEC(T) **vec_ = arg_; \
502 if (*vec_) \
503 vec_free_ (*vec_); \
504 *vec_ = NULL; \
505 } \
506 \
507 static inline int VEC_OP (T,reserve) \
508 (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) \
509 { \
510 int extend = !VEC_OP (T,space) \
511 (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); \
512 \
513 if (extend) \
514 *vec_ = (VEC(T) *) vec_o_reserve (*vec_, alloc_, \
515 offsetof (VEC(T),vec), sizeof (T)); \
516 \
517 return extend; \
518 } \
519 \
520 static inline void VEC_OP (T,safe_grow) \
521 (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) \
522 { \
523 vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, \
524 "safe_grow"); \
525 VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ \
526 VEC_ASSERT_PASS); \
527 (*vec_)->num = size_; \
528 } \
529 \
530 static inline T *VEC_OP (T,safe_push) \
531 (VEC(T) **vec_, const T obj_ VEC_ASSERT_DECL) \
532 { \
533 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
534 \
535 return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); \
536 } \
537 \
538 static inline T *VEC_OP (T,safe_insert) \
539 (VEC(T) **vec_, unsigned ix_, const T obj_ VEC_ASSERT_DECL) \
540 { \
541 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
542 \
543 return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); \
544 }
545
546 #define DEF_VEC_FUNC_P(T) \
547 static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) \
548 { \
549 return vec_ ? vec_->num : 0; \
550 } \
551 \
552 static inline T VEC_OP (T,last) \
553 (const VEC(T) *vec_ VEC_ASSERT_DECL) \
554 { \
555 vec_assert (vec_ && vec_->num, "last"); \
556 \
557 return vec_->vec[vec_->num - 1]; \
558 } \
559 \
560 static inline T VEC_OP (T,index) \
561 (const VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
562 { \
563 vec_assert (vec_ && ix_ < vec_->num, "index"); \
564 \
565 return vec_->vec[ix_]; \
566 } \
567 \
568 static inline int VEC_OP (T,iterate) \
569 (const VEC(T) *vec_, unsigned ix_, T *ptr) \
570 { \
571 if (vec_ && ix_ < vec_->num) \
572 { \
573 *ptr = vec_->vec[ix_]; \
574 return 1; \
575 } \
576 else \
577 { \
578 *ptr = 0; \
579 return 0; \
580 } \
581 } \
582 \
583 static inline size_t VEC_OP (T,embedded_size) \
584 (int alloc_) \
585 { \
586 return offsetof (VEC(T),vec) + alloc_ * sizeof(T); \
587 } \
588 \
589 static inline void VEC_OP (T,embedded_init) \
590 (VEC(T) *vec_, int alloc_) \
591 { \
592 vec_->num = 0; \
593 vec_->alloc = alloc_; \
594 } \
595 \
596 static inline int VEC_OP (T,space) \
597 (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) \
598 { \
599 vec_assert (alloc_ >= 0, "space"); \
600 return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; \
601 } \
602 \
603 static inline T *VEC_OP (T,quick_push) \
604 (VEC(T) *vec_, T obj_ VEC_ASSERT_DECL) \
605 { \
606 T *slot_; \
607 \
608 vec_assert (vec_->num < vec_->alloc, "quick_push"); \
609 slot_ = &vec_->vec[vec_->num++]; \
610 *slot_ = obj_; \
611 \
612 return slot_; \
613 } \
614 \
615 static inline T VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) \
616 { \
617 T obj_; \
618 \
619 vec_assert (vec_->num, "pop"); \
620 obj_ = vec_->vec[--vec_->num]; \
621 \
622 return obj_; \
623 } \
624 \
625 static inline void VEC_OP (T,truncate) \
626 (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) \
627 { \
628 vec_assert (vec_ ? vec_->num >= size_ : !size_, "truncate"); \
629 if (vec_) \
630 vec_->num = size_; \
631 } \
632 \
633 static inline T VEC_OP (T,replace) \
634 (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) \
635 { \
636 T old_obj_; \
637 \
638 vec_assert (ix_ < vec_->num, "replace"); \
639 old_obj_ = vec_->vec[ix_]; \
640 vec_->vec[ix_] = obj_; \
641 \
642 return old_obj_; \
643 } \
644 \
645 static inline T *VEC_OP (T,quick_insert) \
646 (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) \
647 { \
648 T *slot_; \
649 \
650 vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, "quick_insert"); \
651 slot_ = &vec_->vec[ix_]; \
652 memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); \
653 *slot_ = obj_; \
654 \
655 return slot_; \
656 } \
657 \
658 static inline T VEC_OP (T,ordered_remove) \
659 (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
660 { \
661 T *slot_; \
662 T obj_; \
663 \
664 vec_assert (ix_ < vec_->num, "ordered_remove"); \
665 slot_ = &vec_->vec[ix_]; \
666 obj_ = *slot_; \
667 memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); \
668 \
669 return obj_; \
670 } \
671 \
672 static inline T VEC_OP (T,unordered_remove) \
673 (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
674 { \
675 T *slot_; \
676 T obj_; \
677 \
678 vec_assert (ix_ < vec_->num, "unordered_remove"); \
679 slot_ = &vec_->vec[ix_]; \
680 obj_ = *slot_; \
681 *slot_ = vec_->vec[--vec_->num]; \
682 \
683 return obj_; \
684 } \
685 \
686 static inline void VEC_OP (T,block_remove) \
687 (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) \
688 { \
689 T *slot_; \
690 \
691 vec_assert (ix_ + len_ <= vec_->num, "block_remove"); \
692 slot_ = &vec_->vec[ix_]; \
693 vec_->num -= len_; \
694 memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); \
695 } \
696 \
697 static inline T *VEC_OP (T,address) \
698 (VEC(T) *vec_) \
699 { \
700 return vec_ ? vec_->vec : 0; \
701 } \
702 \
703 static inline unsigned VEC_OP (T,lower_bound) \
704 (VEC(T) *vec_, const T obj_, \
705 int (*lessthan_)(const T, const T) VEC_ASSERT_DECL) \
706 { \
707 unsigned int len_ = VEC_OP (T, length) (vec_); \
708 unsigned int half_, middle_; \
709 unsigned int first_ = 0; \
710 while (len_ > 0) \
711 { \
712 T middle_elem_; \
713 half_ = len_ >> 1; \
714 middle_ = first_; \
715 middle_ += half_; \
716 middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); \
717 if (lessthan_ (middle_elem_, obj_)) \
718 { \
719 first_ = middle_; \
720 ++first_; \
721 len_ = len_ - half_ - 1; \
722 } \
723 else \
724 len_ = half_; \
725 } \
726 return first_; \
727 }
728
729 #define DEF_VEC_ALLOC_FUNC_P(T) \
730 static inline VEC(T) *VEC_OP (T,alloc) \
731 (int alloc_) \
732 { \
733 /* We must request exact size allocation, hence the negation. */ \
734 return (VEC(T) *) vec_p_reserve (NULL, -alloc_); \
735 } \
736 \
737 static inline void VEC_OP (T,free) \
738 (VEC(T) **vec_) \
739 { \
740 if (*vec_) \
741 vec_free_ (*vec_); \
742 *vec_ = NULL; \
743 } \
744 \
745 static inline void VEC_OP (T,cleanup) \
746 (void *arg_) \
747 { \
748 VEC(T) **vec_ = arg_; \
749 if (*vec_) \
750 vec_free_ (*vec_); \
751 *vec_ = NULL; \
752 } \
753 \
754 static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) \
755 { \
756 size_t len_ = vec_ ? vec_->num : 0; \
757 VEC (T) *new_vec_ = NULL; \
758 \
759 if (len_) \
760 { \
761 /* We must request exact size allocation, hence the negation. */ \
762 new_vec_ = (VEC (T) *)(vec_p_reserve (NULL, -len_)); \
763 \
764 new_vec_->num = len_; \
765 memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); \
766 } \
767 return new_vec_; \
768 } \
769 \
770 static inline VEC(T) *VEC_OP (T,merge) (VEC(T) *vec1_, VEC(T) *vec2_) \
771 { \
772 if (vec1_ && vec2_) \
773 { \
774 size_t len_ = vec1_->num + vec2_->num; \
775 VEC (T) *new_vec_ = NULL; \
776 \
777 /* We must request exact size allocation, hence the negation. */ \
778 new_vec_ = (VEC (T) *)(vec_p_reserve (NULL, -len_)); \
779 \
780 new_vec_->num = len_; \
781 memcpy (new_vec_->vec, vec1_->vec, sizeof (T) * vec1_->num); \
782 memcpy (new_vec_->vec + vec1_->num, vec2_->vec, \
783 sizeof (T) * vec2_->num); \
784 \
785 return new_vec_; \
786 } \
787 else \
788 return VEC_copy (T, vec1_ ? vec1_ : vec2_); \
789 } \
790 \
791 static inline int VEC_OP (T,reserve) \
792 (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) \
793 { \
794 int extend = !VEC_OP (T,space) \
795 (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); \
796 \
797 if (extend) \
798 *vec_ = (VEC(T) *) vec_p_reserve (*vec_, alloc_); \
799 \
800 return extend; \
801 } \
802 \
803 static inline void VEC_OP (T,safe_grow) \
804 (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) \
805 { \
806 vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, \
807 "safe_grow"); \
808 VEC_OP (T,reserve) \
809 (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); \
810 (*vec_)->num = size_; \
811 } \
812 \
813 static inline T *VEC_OP (T,safe_push) \
814 (VEC(T) **vec_, T obj_ VEC_ASSERT_DECL) \
815 { \
816 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
817 \
818 return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); \
819 } \
820 \
821 static inline T *VEC_OP (T,safe_insert) \
822 (VEC(T) **vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) \
823 { \
824 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
825 \
826 return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); \
827 }
828
829 #define DEF_VEC_FUNC_O(T) \
830 static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) \
831 { \
832 return vec_ ? vec_->num : 0; \
833 } \
834 \
835 static inline T *VEC_OP (T,last) (VEC(T) *vec_ VEC_ASSERT_DECL) \
836 { \
837 vec_assert (vec_ && vec_->num, "last"); \
838 \
839 return &vec_->vec[vec_->num - 1]; \
840 } \
841 \
842 static inline T *VEC_OP (T,index) \
843 (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
844 { \
845 vec_assert (vec_ && ix_ < vec_->num, "index"); \
846 \
847 return &vec_->vec[ix_]; \
848 } \
849 \
850 static inline int VEC_OP (T,iterate) \
851 (VEC(T) *vec_, unsigned ix_, T **ptr) \
852 { \
853 if (vec_ && ix_ < vec_->num) \
854 { \
855 *ptr = &vec_->vec[ix_]; \
856 return 1; \
857 } \
858 else \
859 { \
860 *ptr = 0; \
861 return 0; \
862 } \
863 } \
864 \
865 static inline size_t VEC_OP (T,embedded_size) \
866 (int alloc_) \
867 { \
868 return offsetof (VEC(T),vec) + alloc_ * sizeof(T); \
869 } \
870 \
871 static inline void VEC_OP (T,embedded_init) \
872 (VEC(T) *vec_, int alloc_) \
873 { \
874 vec_->num = 0; \
875 vec_->alloc = alloc_; \
876 } \
877 \
878 static inline int VEC_OP (T,space) \
879 (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) \
880 { \
881 vec_assert (alloc_ >= 0, "space"); \
882 return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; \
883 } \
884 \
885 static inline T *VEC_OP (T,quick_push) \
886 (VEC(T) *vec_, const T *obj_ VEC_ASSERT_DECL) \
887 { \
888 T *slot_; \
889 \
890 vec_assert (vec_->num < vec_->alloc, "quick_push"); \
891 slot_ = &vec_->vec[vec_->num++]; \
892 if (obj_) \
893 *slot_ = *obj_; \
894 \
895 return slot_; \
896 } \
897 \
898 static inline void VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) \
899 { \
900 vec_assert (vec_->num, "pop"); \
901 --vec_->num; \
902 } \
903 \
904 static inline void VEC_OP (T,truncate) \
905 (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) \
906 { \
907 vec_assert (vec_ ? vec_->num >= size_ : !size_, "truncate"); \
908 if (vec_) \
909 vec_->num = size_; \
910 } \
911 \
912 static inline T *VEC_OP (T,replace) \
913 (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) \
914 { \
915 T *slot_; \
916 \
917 vec_assert (ix_ < vec_->num, "replace"); \
918 slot_ = &vec_->vec[ix_]; \
919 if (obj_) \
920 *slot_ = *obj_; \
921 \
922 return slot_; \
923 } \
924 \
925 static inline T *VEC_OP (T,quick_insert) \
926 (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) \
927 { \
928 T *slot_; \
929 \
930 vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, "quick_insert"); \
931 slot_ = &vec_->vec[ix_]; \
932 memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); \
933 if (obj_) \
934 *slot_ = *obj_; \
935 \
936 return slot_; \
937 } \
938 \
939 static inline void VEC_OP (T,ordered_remove) \
940 (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
941 { \
942 T *slot_; \
943 \
944 vec_assert (ix_ < vec_->num, "ordered_remove"); \
945 slot_ = &vec_->vec[ix_]; \
946 memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); \
947 } \
948 \
949 static inline void VEC_OP (T,unordered_remove) \
950 (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) \
951 { \
952 vec_assert (ix_ < vec_->num, "unordered_remove"); \
953 vec_->vec[ix_] = vec_->vec[--vec_->num]; \
954 } \
955 \
956 static inline void VEC_OP (T,block_remove) \
957 (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) \
958 { \
959 T *slot_; \
960 \
961 vec_assert (ix_ + len_ <= vec_->num, "block_remove"); \
962 slot_ = &vec_->vec[ix_]; \
963 vec_->num -= len_; \
964 memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); \
965 } \
966 \
967 static inline T *VEC_OP (T,address) \
968 (VEC(T) *vec_) \
969 { \
970 return vec_ ? vec_->vec : 0; \
971 } \
972 \
973 static inline unsigned VEC_OP (T,lower_bound) \
974 (VEC(T) *vec_, const T *obj_, \
975 int (*lessthan_)(const T *, const T *) VEC_ASSERT_DECL) \
976 { \
977 unsigned int len_ = VEC_OP (T, length) (vec_); \
978 unsigned int half_, middle_; \
979 unsigned int first_ = 0; \
980 while (len_ > 0) \
981 { \
982 T *middle_elem_; \
983 half_ = len_ >> 1; \
984 middle_ = first_; \
985 middle_ += half_; \
986 middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); \
987 if (lessthan_ (middle_elem_, obj_)) \
988 { \
989 first_ = middle_; \
990 ++first_; \
991 len_ = len_ - half_ - 1; \
992 } \
993 else \
994 len_ = half_; \
995 } \
996 return first_; \
997 }
998
999 #define DEF_VEC_ALLOC_FUNC_O(T) \
1000 static inline VEC(T) *VEC_OP (T,alloc) \
1001 (int alloc_) \
1002 { \
1003 /* We must request exact size allocation, hence the negation. */ \
1004 return (VEC(T) *) vec_o_reserve (NULL, -alloc_, \
1005 offsetof (VEC(T),vec), sizeof (T)); \
1006 } \
1007 \
1008 static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) \
1009 { \
1010 size_t len_ = vec_ ? vec_->num : 0; \
1011 VEC (T) *new_vec_ = NULL; \
1012 \
1013 if (len_) \
1014 { \
1015 /* We must request exact size allocation, hence the negation. */ \
1016 new_vec_ = (VEC (T) *) \
1017 vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); \
1018 \
1019 new_vec_->num = len_; \
1020 memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); \
1021 } \
1022 return new_vec_; \
1023 } \
1024 \
1025 static inline VEC(T) *VEC_OP (T,merge) (VEC(T) *vec1_, VEC(T) *vec2_) \
1026 { \
1027 if (vec1_ && vec2_) \
1028 { \
1029 size_t len_ = vec1_->num + vec2_->num; \
1030 VEC (T) *new_vec_ = NULL; \
1031 \
1032 /* We must request exact size allocation, hence the negation. */ \
1033 new_vec_ = (VEC (T) *) \
1034 vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); \
1035 \
1036 new_vec_->num = len_; \
1037 memcpy (new_vec_->vec, vec1_->vec, sizeof (T) * vec1_->num); \
1038 memcpy (new_vec_->vec + vec1_->num, vec2_->vec, \
1039 sizeof (T) * vec2_->num); \
1040 \
1041 return new_vec_; \
1042 } \
1043 else \
1044 return VEC_copy (T, vec1_ ? vec1_ : vec2_); \
1045 } \
1046 \
1047 static inline void VEC_OP (T,free) \
1048 (VEC(T) **vec_) \
1049 { \
1050 if (*vec_) \
1051 vec_free_ (*vec_); \
1052 *vec_ = NULL; \
1053 } \
1054 \
1055 static inline void VEC_OP (T,cleanup) \
1056 (void *arg_) \
1057 { \
1058 VEC(T) **vec_ = arg_; \
1059 if (*vec_) \
1060 vec_free_ (*vec_); \
1061 *vec_ = NULL; \
1062 } \
1063 \
1064 static inline int VEC_OP (T,reserve) \
1065 (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) \
1066 { \
1067 int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ \
1068 VEC_ASSERT_PASS); \
1069 \
1070 if (extend) \
1071 *vec_ = (VEC(T) *) \
1072 vec_o_reserve (*vec_, alloc_, offsetof (VEC(T),vec), sizeof (T)); \
1073 \
1074 return extend; \
1075 } \
1076 \
1077 static inline void VEC_OP (T,safe_grow) \
1078 (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) \
1079 { \
1080 vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, \
1081 "safe_grow"); \
1082 VEC_OP (T,reserve) \
1083 (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); \
1084 (*vec_)->num = size_; \
1085 } \
1086 \
1087 static inline T *VEC_OP (T,safe_push) \
1088 (VEC(T) **vec_, const T *obj_ VEC_ASSERT_DECL) \
1089 { \
1090 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
1091 \
1092 return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); \
1093 } \
1094 \
1095 static inline T *VEC_OP (T,safe_insert) \
1096 (VEC(T) **vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) \
1097 { \
1098 VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); \
1099 \
1100 return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); \
1101 }
1102
1103 #endif /* GDB_VEC_H */
This page took 0.05276 seconds and 5 git commands to generate.