docs: cleanup: Rephrase and correct typos
[barectf.git] / barectf / config_parse_v3.py
1 # The MIT License (MIT)
2 #
3 # Copyright (c) 2015-2020 Philippe Proulx <pproulx@efficios.com>
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24 import barectf.config_parse_common as barectf_config_parse_common
25 from barectf.config_parse_common import _ConfigurationParseError
26 from barectf.config_parse_common import _append_error_ctx
27 from barectf.config_parse_common import _MapNode
28 import barectf.config as barectf_config
29 from barectf.config import _OptStructFt
30 import collections
31 import uuid
32 from barectf.typing import Count, Alignment, VersionNumber
33 from typing import Optional, List, Dict, Any, TextIO, Set, Iterable, Callable, Tuple, Type
34 import typing
35
36
37 # A barectf 3 YAML configuration parser.
38 #
39 # When you build such a parser, it parses the configuration node `node`
40 # (already loaded from the file having the path `path`) and creates a
41 # corresponding `barectf.Configuration` object which you can get with
42 # the `config` property.
43 #
44 # See the comments of _parse() for more implementation details about the
45 # parsing stages and general strategy.
46 class _Parser(barectf_config_parse_common._Parser):
47 # Builds a barectf 3 YAML configuration parser and parses the root
48 # configuration node `node` (already loaded from the file-like
49 # object `root_file`).
50 def __init__(self, root_file: TextIO, node: barectf_config_parse_common._ConfigNodeV3,
51 with_pkg_include_dir: bool, inclusion_dirs: Optional[List[str]],
52 ignore_include_not_found: bool):
53 super().__init__(root_file, node, with_pkg_include_dir, inclusion_dirs,
54 ignore_include_not_found, VersionNumber(3))
55 self._ft_cls_name_to_create_method: Dict[str, Callable[[_MapNode],
56 List[barectf_config._FieldType]]] = {
57 'unsigned-integer': self._create_int_ft,
58 'signed-integer': self._create_int_ft,
59 'unsigned-enumeration': self._create_enum_ft,
60 'signed-enumeration': self._create_enum_ft,
61 'real': self._create_real_ft,
62 'string': self._create_string_ft,
63 'static-array': self._create_static_array_ft,
64 'dynamic-array': self._create_dynamic_array_ft,
65 'structure': self._create_struct_ft,
66 }
67 self._parse()
68
69 # Validates the alignment `alignment`, raising a
70 # `_ConfigurationParseError` exception using `ctx_obj_name` if it's
71 # invalid.
72 @staticmethod
73 def _validate_alignment(alignment: Alignment, ctx_obj_name: str):
74 assert alignment >= 1
75
76 # check for power of two
77 if (alignment & (alignment - 1)) != 0:
78 raise _ConfigurationParseError(ctx_obj_name,
79 f'Invalid alignment (not a power of two): {alignment}')
80
81 # Validates the TSDL identifier `iden`, raising a
82 # `_ConfigurationParseError` exception using `ctx_obj_name` and
83 # `prop` to format the message if it's invalid.
84 @staticmethod
85 def _validate_iden(iden: str, ctx_obj_name: str, prop: str):
86 assert type(iden) is str
87 ctf_keywords = {
88 'align',
89 'callsite',
90 'clock',
91 'enum',
92 'env',
93 'event',
94 'floating_point',
95 'integer',
96 'stream',
97 'string',
98 'struct',
99 'trace',
100 'typealias',
101 'typedef',
102 'variant',
103 }
104
105 if iden in ctf_keywords:
106 msg = f'Invalid {prop} (not a valid identifier): `{iden}`'
107 raise _ConfigurationParseError(ctx_obj_name, msg)
108
109 @staticmethod
110 def _alignment_prop(ft_node: _MapNode, prop_name: str) -> Alignment:
111 alignment = ft_node.get(prop_name)
112
113 if alignment is not None:
114 _Parser._validate_alignment(alignment, '`prop_name` property')
115
116 return Alignment(alignment)
117
118 @property
119 def _trace_type_node(self) -> _MapNode:
120 return self.config_node['trace']['type']
121
122 @staticmethod
123 def _byte_order_from_node(node: str) -> barectf_config.ByteOrder:
124 return {
125 'big-endian': barectf_config.ByteOrder.BIG_ENDIAN,
126 'little-endian': barectf_config.ByteOrder.LITTLE_ENDIAN,
127 }[node]
128
129 # Creates a bit array field type having the type `ft_type` from the
130 # bit array field type node `ft_node`, passing the additional
131 # `*args` to ft_type.__init__().
132 def _create_common_bit_array_ft(self, ft_node: _MapNode,
133 ft_type: Type[barectf_config._BitArrayFieldType],
134 default_alignment: Optional[Alignment],
135 *args) -> barectf_config._BitArrayFieldType:
136 alignment = self._alignment_prop(ft_node, 'alignment')
137
138 if alignment is None:
139 alignment = default_alignment
140
141 return ft_type(ft_node['size'], alignment, *args)
142
143 # Creates an integer field type having the type `ft_type` from the
144 # integer field type node `ft_node`, passing the additional `*args`
145 # to ft_type.__init__().
146 def _create_common_int_ft(self, ft_node: _MapNode,
147 ft_type: Type[barectf_config._IntegerFieldType], *args) -> barectf_config._IntegerFieldType:
148 preferred_display_base = {
149 'binary': barectf_config.DisplayBase.BINARY,
150 'octal': barectf_config.DisplayBase.OCTAL,
151 'decimal': barectf_config.DisplayBase.DECIMAL,
152 'hexadecimal': barectf_config.DisplayBase.HEXADECIMAL,
153 }[ft_node.get('preferred-display-base', 'decimal')]
154 return typing.cast(barectf_config._IntegerFieldType,
155 self._create_common_bit_array_ft(ft_node, ft_type, None,
156 preferred_display_base, *args))
157
158 # Creates an integer field type from the unsigned/signed integer
159 # field type node `ft_node`.
160 def _create_int_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
161 ft_type = {
162 'unsigned-integer': barectf_config.UnsignedIntegerFieldType,
163 'signed-integer': barectf_config.SignedIntegerFieldType,
164 }[ft_node['class']]
165 return [self._create_common_int_ft(ft_node, ft_type)]
166
167 # Creates an enumeration field type from the unsigned/signed
168 # enumeration field type node `ft_node`.
169 def _create_enum_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
170 ft_type = {
171 'unsigned-enumeration': barectf_config.UnsignedEnumerationFieldType,
172 'signed-enumeration': barectf_config.SignedEnumerationFieldType,
173 }[ft_node['class']]
174 mappings = collections.OrderedDict()
175
176 for label, mapping_node in ft_node['mappings'].items():
177 ranges = set()
178
179 for range_node in mapping_node:
180 if type(range_node) is list:
181 ranges.add(barectf_config.EnumerationFieldTypeMappingRange(range_node[0],
182 range_node[1]))
183 else:
184 assert type(range_node) is int
185 ranges.add(barectf_config.EnumerationFieldTypeMappingRange(range_node,
186 range_node))
187
188 mappings[label] = barectf_config.EnumerationFieldTypeMapping(ranges)
189
190 return [typing.cast(barectf_config._EnumerationFieldType,
191 self._create_common_int_ft(ft_node, ft_type,
192 barectf_config.EnumerationFieldTypeMappings(mappings)))]
193
194 # Creates a real field type from the real field type node `ft_node`.
195 def _create_real_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
196 return [typing.cast(barectf_config.RealFieldType,
197 self._create_common_bit_array_ft(ft_node, barectf_config.RealFieldType,
198 Alignment(8)))]
199
200 # Creates a string field type from the string field type node
201 # `ft_node`.
202 def _create_string_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
203 return [barectf_config.StringFieldType()]
204
205 def _create_array_ft(self, ft_type, ft_node: _MapNode, **kwargs) -> barectf_config._ArrayFieldType:
206 prop_name = 'element-field-type'
207
208 try:
209 element_fts = self._create_fts(ft_node[prop_name])
210 except _ConfigurationParseError as exc:
211 _append_error_ctx(exc, f'`{prop_name}` property')
212
213 if len(element_fts) != 1 or isinstance(element_fts[0], (barectf_config.StructureFieldType,
214 barectf_config.DynamicArrayFieldType)):
215 raise _ConfigurationParseError(f'`{prop_name}` property',
216 'Nested structure and dynamic array field types are not supported')
217
218 return ft_type(element_field_type=element_fts[0], **kwargs)
219
220 # Creates a static array field type from the static array field type
221 # node `ft_node`.
222 def _create_static_array_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
223 return [typing.cast(barectf_config.StaticArrayFieldType,
224 self._create_array_ft(barectf_config.StaticArrayFieldType, ft_node,
225 length=ft_node['length']))]
226
227 # Creates a dynamic array field type from the dynamic array field
228 # type node `ft_node`.
229 def _create_dynamic_array_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
230 # create length unsigned integer field type
231 len_ft = barectf_config.UnsignedIntegerFieldType(32, alignment=Alignment(8))
232 return [
233 len_ft,
234 typing.cast(barectf_config.DynamicArrayFieldType,
235 self._create_array_ft(barectf_config.DynamicArrayFieldType, ft_node,
236 length_field_type=len_ft))
237 ]
238
239 # Creates structure field type members from the structure field type
240 # members node `members_node`.
241 #
242 # `prop_name` is the name of the property of which `members_node` is
243 # the value.
244 def _create_struct_ft_members(self, members_node: List[_MapNode], prop_name: str):
245 members = collections.OrderedDict()
246 member_names: Set[str] = set()
247
248 for member_node in members_node:
249 member_name, member_node = list(member_node.items())[0]
250
251 if member_name in member_names:
252 raise _ConfigurationParseError(f'`{prop_name}` property',
253 f'Duplicate member `{member_name}`')
254
255 self._validate_iden(member_name, f'`{prop_name}` property',
256 'structure field type member name')
257 member_names.add(member_name)
258 ft_prop_name = 'field-type'
259 ft_node = member_node[ft_prop_name]
260
261 try:
262 if ft_node['class'] in ['structure']:
263 raise _ConfigurationParseError(f'`{ft_prop_name}` property',
264 'Nested structure field types are not supported')
265
266 try:
267 member_fts = self._create_fts(ft_node)
268 except _ConfigurationParseError as exc:
269 _append_error_ctx(exc, f'`{ft_prop_name}` property')
270 except _ConfigurationParseError as exc:
271 _append_error_ctx(exc, f'Structure field type member `{member_name}`')
272
273 if len(member_fts) == 2:
274 # The only case where this happens is a dynamic array
275 # field type node which generates an unsigned integer
276 # field type for the length and the dynamic array field
277 # type itself.
278 assert type(member_fts[1]) is barectf_config.DynamicArrayFieldType
279 members[f'__{member_name}_len'] = barectf_config.StructureFieldTypeMember(member_fts[0])
280 else:
281 assert len(member_fts) == 1
282
283 members[member_name] = barectf_config.StructureFieldTypeMember(member_fts[-1])
284
285 return barectf_config.StructureFieldTypeMembers(members)
286
287 # Creates a structure field type from the structure field type node
288 # `ft_node`.
289 def _create_struct_ft(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
290 minimum_alignment = self._alignment_prop(ft_node, 'minimum-alignment')
291
292 if minimum_alignment is None:
293 minimum_alignment = 1
294
295 members = None
296 prop_name = 'members'
297 members_node = ft_node.get(prop_name)
298
299 if members_node is not None:
300 members = self._create_struct_ft_members(members_node, prop_name)
301
302 return [barectf_config.StructureFieldType(minimum_alignment, members)]
303
304 # Creates field types from the field type node `ft_node`.
305 def _create_fts(self, ft_node: _MapNode) -> List[barectf_config._FieldType]:
306 return self._ft_cls_name_to_create_method[ft_node['class']](ft_node)
307
308 # Creates field types from the field type node `parent_node[key]`
309 # if it exists.
310 def _try_create_fts(self, parent_node: _MapNode, key: str) -> Optional[List[barectf_config._FieldType]]:
311 if key not in parent_node:
312 return None
313
314 try:
315 return self._create_fts(parent_node[key])
316 except _ConfigurationParseError as exc:
317 _append_error_ctx(exc, f'`{key}` property')
318
319 # satisfy static type checker (never reached)
320 raise
321
322 # Like _try_create_fts(), but casts the result's type (first and
323 # only element) to `barectf_config.StructureFieldType` to satisfy
324 # static type checkers.
325 def _try_create_struct_ft(self, parent_node: _MapNode, key: str) -> _OptStructFt:
326 fts = self._try_create_fts(parent_node, key)
327
328 if fts is None:
329 return None
330
331 return typing.cast(barectf_config.StructureFieldType, fts[0])
332
333 # Returns the total number of members in the structure field type
334 # node `ft_node` if it exists, otherwise 0.
335 @staticmethod
336 def _total_struct_ft_node_members(ft_node: Optional[_MapNode]) -> Count:
337 if ft_node is None:
338 return Count(0)
339
340 members_node = ft_node.get('members')
341
342 if members_node is None:
343 return Count(0)
344
345 return Count(len(members_node))
346
347 # Creates an event record type from the event record type node
348 # `ert_node` named `name`.
349 #
350 # `ert_member_count` is the total number of structure field type
351 # members within the event record type so far (from the common part
352 # in its data stream type). For example, if the data stream type has
353 # an event record header field type with `id` and `timestamp`
354 # members, then `ert_member_count` is 2.
355 def _create_ert(self, name: str, ert_node: _MapNode,
356 ert_member_count: Count) -> barectf_config.EventRecordType:
357 try:
358 self._validate_iden(name, '`name` property', 'event record type name')
359
360 # make sure the event record type is not empty
361 spec_ctx_ft_prop_name = 'specific-context-field-type'
362 payload_ft_prop_name = 'payload-field-type'
363 ert_member_count = Count(ert_member_count + self._total_struct_ft_node_members(ert_node.get(spec_ctx_ft_prop_name)))
364 ert_member_count = Count(ert_member_count + self._total_struct_ft_node_members(ert_node.get(payload_ft_prop_name)))
365
366 if ert_member_count == 0:
367 raise _ConfigurationParseError('Event record type',
368 'Event record type is empty (no members).')
369
370 # create event record type
371 return barectf_config.EventRecordType(name, ert_node.get('log-level'),
372 self._try_create_struct_ft(ert_node,
373 spec_ctx_ft_prop_name),
374 self._try_create_struct_ft(ert_node,
375 payload_ft_prop_name))
376 except _ConfigurationParseError as exc:
377 _append_error_ctx(exc, f'Event record type `{name}`')
378
379 # satisfy static type checker (never reached)
380 raise
381
382 # Returns the effective feature field type for the field type
383 # node `parent_node[key]`, if any.
384 #
385 # Returns:
386 #
387 # If `parent_node[key]` is `False`:
388 # `None`.
389 #
390 # If `parent_node[key]` is `True`:
391 # `barectf_config.DEFAULT_FIELD_TYPE`.
392 #
393 # If `parent_node[key]` doesn't exist:
394 # `none` (parameter).
395 #
396 # Otherwise:
397 # A created field type.
398 def _feature_ft(self, parent_node: _MapNode, key: str, none: Any = None) -> Any:
399 if key not in parent_node:
400 # missing: default feature field type
401 return none
402
403 ft_node = parent_node[key]
404 assert ft_node is not None
405
406 if ft_node is True:
407 # default feature field type
408 return barectf_config.DEFAULT_FIELD_TYPE
409
410 if ft_node is False:
411 # disabled feature
412 return None
413
414 assert type(ft_node) is collections.OrderedDict
415 return self._create_fts(ft_node)[0]
416
417 def _create_dst(self, name: str, dst_node: _MapNode) -> barectf_config.DataStreamType:
418 try:
419 # validate data stream type's name
420 self._validate_iden(name, '`name` property', 'data stream type name')
421
422 # get default clock type, if any
423 def_clk_type = None
424 prop_name = '$default-clock-type-name'
425 def_clk_type_name = dst_node.get(prop_name)
426
427 if def_clk_type_name is not None:
428 try:
429 def_clk_type = self._clk_type(def_clk_type_name, prop_name)
430 except _ConfigurationParseError as exc:
431 _append_error_ctx(exc, f'`{prop_name}` property')
432
433 # create feature field types
434 pkt_total_size_ft = barectf_config.DEFAULT_FIELD_TYPE
435 pkt_content_size_ft = barectf_config.DEFAULT_FIELD_TYPE
436 pkt_beginning_ts_ft = None
437 pkt_end_ts_ft = None
438 pkt_disc_er_counter_snap_ft = barectf_config.DEFAULT_FIELD_TYPE
439 ert_id_ft = barectf_config.DEFAULT_FIELD_TYPE
440 ert_ts_ft = None
441 pkt_seq_num_ft = None
442
443 if def_clk_type is not None:
444 # The data stream type has a default clock type.
445 # Initialize the packet beginning timestamp, packet end
446 # timestamp, and event record timestamp field types to
447 # default field types.
448 #
449 # This means your data stream type node only needs a
450 # default clock type name to enable those features
451 # automatically. Those features do not add any parameter
452 # to the event tracing functions.
453 pkt_beginning_ts_ft = barectf_config.DEFAULT_FIELD_TYPE
454 pkt_end_ts_ft = barectf_config.DEFAULT_FIELD_TYPE
455 ert_ts_ft = barectf_config.DEFAULT_FIELD_TYPE
456
457 features_node = dst_node.get('$features')
458
459 if features_node is not None:
460 # create packet feature field types
461 pkt_node = features_node.get('packet')
462
463 if pkt_node is not None:
464 pkt_total_size_ft = self._feature_ft(pkt_node, 'total-size-field-type',
465 pkt_total_size_ft)
466 pkt_content_size_ft = self._feature_ft(pkt_node, 'content-size-field-type',
467 pkt_content_size_ft)
468 pkt_beginning_ts_ft = self._feature_ft(pkt_node,
469 'beginning-timestamp-field-type',
470 pkt_beginning_ts_ft)
471 pkt_end_ts_ft = self._feature_ft(pkt_node, 'end-timestamp-field-type',
472 pkt_end_ts_ft)
473 pkt_disc_er_counter_snap_ft = self._feature_ft(pkt_node,
474 'discarded-event-records-counter-snapshot-field-type',
475 pkt_disc_er_counter_snap_ft)
476 pkt_seq_num_ft = self._feature_ft(pkt_node, 'sequence-number-field-type',
477 pkt_seq_num_ft)
478
479 # create event record feature field types
480 er_node = features_node.get('event-record')
481 type_id_ft_prop_name = 'type-id-field-type'
482
483 if er_node is not None:
484 ert_id_ft = self._feature_ft(er_node, type_id_ft_prop_name, ert_id_ft)
485 ert_ts_ft = self._feature_ft(er_node, 'timestamp-field-type', ert_ts_ft)
486
487 erts_prop_name = 'event-record-types'
488 ert_count = len(dst_node[erts_prop_name])
489
490 try:
491 if ert_id_ft is None and ert_count > 1:
492 raise _ConfigurationParseError(f'`{type_id_ft_prop_name}` property',
493 'Event record type ID field type feature is required because data stream type has more than one event record type')
494
495 if isinstance(ert_id_ft, barectf_config._IntegerFieldType):
496 ert_id_int_ft = typing.cast(barectf_config._IntegerFieldType, ert_id_ft)
497
498 if ert_count > (1 << ert_id_int_ft.size):
499 raise _ConfigurationParseError(f'`{type_id_ft_prop_name}` property',
500 f'Field type\'s size ({ert_id_int_ft.size} bits) is too small to accomodate {ert_count} event record types')
501 except _ConfigurationParseError as exc:
502 exc._append_ctx('`event-record` property')
503 _append_error_ctx(exc, '`$features` property')
504
505 pkt_features = barectf_config.DataStreamTypePacketFeatures(pkt_total_size_ft,
506 pkt_content_size_ft,
507 pkt_beginning_ts_ft,
508 pkt_end_ts_ft,
509 pkt_disc_er_counter_snap_ft,
510 pkt_seq_num_ft)
511 er_features = barectf_config.DataStreamTypeEventRecordFeatures(ert_id_ft, ert_ts_ft)
512 features = barectf_config.DataStreamTypeFeatures(pkt_features, er_features)
513
514 # create packet context (structure) field type extra members
515 pkt_ctx_ft_extra_members = None
516 prop_name = 'packet-context-field-type-extra-members'
517 pkt_ctx_ft_extra_members_node = dst_node.get(prop_name)
518
519 if pkt_ctx_ft_extra_members_node is not None:
520 pkt_ctx_ft_extra_members = self._create_struct_ft_members(pkt_ctx_ft_extra_members_node,
521 prop_name)
522
523 # check for illegal packet context field type member names
524 reserved_member_names = {
525 'packet_size',
526 'content_size',
527 'timestamp_begin',
528 'timestamp_end',
529 'events_discarded',
530 'packet_seq_num',
531 }
532
533 for member_name in pkt_ctx_ft_extra_members:
534 if member_name in reserved_member_names:
535 raise _ConfigurationParseError(f'`{prop_name}` property',
536 f'Packet context field type member name `{member_name}` is reserved.')
537
538 # create event record types
539 er_header_common_ctx_member_count = Count(0)
540
541 if er_features.type_id_field_type is not None:
542 er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + 1)
543
544 if er_features.timestamp_field_type is not None:
545 er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + 1)
546
547 er_common_ctx_ft_prop_name = 'event-record-common-context-field-type'
548 er_common_ctx_ft_node = dst_node.get(er_common_ctx_ft_prop_name)
549 er_header_common_ctx_member_count = Count(er_header_common_ctx_member_count + self._total_struct_ft_node_members(er_common_ctx_ft_node))
550 erts = set()
551
552 for ert_name, ert_node in dst_node[erts_prop_name].items():
553 erts.add(self._create_ert(ert_name, ert_node, er_header_common_ctx_member_count))
554
555 # create data stream type
556 return barectf_config.DataStreamType(name, erts, def_clk_type, features,
557 pkt_ctx_ft_extra_members,
558 self._try_create_struct_ft(dst_node,
559 er_common_ctx_ft_prop_name))
560 except _ConfigurationParseError as exc:
561 _append_error_ctx(exc, f'Data data stream type `{name}`')
562
563 # satisfy static type checker (never reached)
564 raise
565
566 def _clk_type(self, name: str, prop_name: str) -> barectf_config.ClockType:
567 clk_type = self._clk_types.get(name)
568
569 if clk_type is None:
570 raise _ConfigurationParseError(f'`{prop_name}` property',
571 f'Clock type `{name}` does not exist')
572
573 return clk_type
574
575 def _create_clk_type(self, name: str, clk_type_node: _MapNode) -> barectf_config.ClockType:
576 self._validate_iden(name, '`name` property', 'clock type name')
577 clk_type_uuid = None
578 uuid_node = clk_type_node.get('uuid')
579
580 if uuid_node is not None:
581 clk_type_uuid = uuid.UUID(uuid_node)
582
583 offset_seconds = 0
584 offset_cycles = Count(0)
585 offset_node = clk_type_node.get('offset')
586
587 if offset_node is not None:
588 offset_seconds = offset_node.get('seconds', 0)
589 offset_cycles = offset_node.get('cycles', Count(0))
590
591 return barectf_config.ClockType(name, clk_type_node.get('frequency', int(1e9)),
592 clk_type_uuid, clk_type_node.get('description'),
593 clk_type_node.get('precision', 0),
594 barectf_config.ClockTypeOffset(offset_seconds, offset_cycles),
595 clk_type_node.get('origin-is-unix-epoch', True))
596
597 def _create_clk_types(self):
598 self._clk_types = {}
599
600 for clk_type_name, clk_type_node in self._trace_type_node.get('clock-types', {}).items():
601 self._clk_types[clk_type_name] = self._create_clk_type(clk_type_name, clk_type_node)
602
603 def _create_trace_type(self):
604 try:
605 # create clock types (_create_dst() needs them)
606 self._create_clk_types()
607
608 # get UUID
609 trace_type_uuid = None
610 uuid_node = self._trace_type_node.get('uuid')
611
612 if uuid_node is not None:
613 if uuid_node == 'auto':
614 trace_type_uuid = uuid.uuid1()
615 else:
616 trace_type_uuid = uuid.UUID(uuid_node)
617
618 # create feature field types
619 magic_ft = barectf_config.DEFAULT_FIELD_TYPE
620 uuid_ft = None
621 dst_id_ft = barectf_config.DEFAULT_FIELD_TYPE
622
623 if trace_type_uuid is not None:
624 # Trace type has a UUID: initialize UUID field type to
625 # a default field type.
626 uuid_ft = barectf_config.DEFAULT_FIELD_TYPE
627
628 features_node = self._trace_type_node.get('$features')
629 dst_id_ft_prop_name = 'data-stream-type-id-field-type'
630
631 if features_node is not None:
632 magic_ft = self._feature_ft(features_node, 'magic-field-type',
633 magic_ft)
634 uuid_ft = self._feature_ft(features_node, 'uuid-field-type', uuid_ft)
635 dst_id_ft = self._feature_ft(features_node, dst_id_ft_prop_name, dst_id_ft)
636
637 dsts_prop_name = 'data-stream-types'
638 dst_count = len(self._trace_type_node[dsts_prop_name])
639
640 try:
641 if dst_id_ft is None and dst_count > 1:
642 raise _ConfigurationParseError(f'`{dst_id_ft_prop_name}` property',
643 'Data stream type ID field type feature is required because trace type has more than one data stream type')
644
645 if isinstance(dst_id_ft, barectf_config._FieldType) and dst_count > (1 << dst_id_ft.size):
646 raise _ConfigurationParseError(f'`{dst_id_ft_prop_name}` property',
647 f'Field type\'s size ({dst_id_ft.size} bits) is too small to accomodate {dst_count} data stream types')
648 except _ConfigurationParseError as exc:
649 _append_error_ctx(exc, '`$features` property')
650
651 features = barectf_config.TraceTypeFeatures(magic_ft, uuid_ft, dst_id_ft)
652
653 # create data stream types
654 dsts = set()
655
656 for dst_name, dst_node in self._trace_type_node[dsts_prop_name].items():
657 dsts.add(self._create_dst(dst_name, dst_node))
658
659 # create trace type
660 if self._trace_byte_order_prop_key == 'native-byte-order':
661 trace_type_cls = barectf_config.TraceType
662 else:
663 trace_type_cls = barectf_config.TraceTypeWithUnknownNativeByteOrder
664
665 return trace_type_cls(self._trace_byte_order, dsts, trace_type_uuid, features)
666 except _ConfigurationParseError as exc:
667 _append_error_ctx(exc, 'Trace type')
668
669 def _create_trace(self):
670 try:
671 trace_type = self._create_trace_type()
672 trace_node = self.config_node['trace']
673 env = None
674 env_node = trace_node.get('environment')
675
676 if env_node is not None:
677 # validate each environment variable name
678 for name in env_node:
679 self._validate_iden(name, '`environment` property',
680 'environment variable name')
681
682 # the node already has the expected structure
683 env = barectf_config.TraceEnvironment(env_node)
684
685 return barectf_config.Trace(trace_type, env)
686
687 except _ConfigurationParseError as exc:
688 _append_error_ctx(exc, 'Trace')
689
690 def _create_config(self):
691 # create trace first
692 trace = self._create_trace()
693
694 # find default data stream type, if any
695 def_dst = None
696
697 for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
698 prop_name = '$is-default'
699 is_default = dst_node.get(prop_name)
700
701 if is_default is True:
702 if def_dst is not None:
703 exc = _ConfigurationParseError(f'`{prop_name}` property',
704 f'Duplicate default data stream type (`{def_dst.name}`)')
705 exc._append_ctx(f'Data stream type `{dst_name}`')
706 _append_error_ctx(exc, 'Trace type')
707
708 def_dst = trace.type.data_stream_type(dst_name)
709
710 # create clock type C type mapping
711 clk_types_node = self._trace_type_node.get('clock-types')
712 clk_type_c_types = None
713
714 if clk_types_node is not None:
715 clk_type_c_types = collections.OrderedDict()
716
717 for dst in trace.type.data_stream_types:
718 if dst.default_clock_type is None:
719 continue
720
721 clk_type_node = clk_types_node[dst.default_clock_type.name]
722 c_type = clk_type_node.get('$c-type')
723
724 if c_type is not None:
725 clk_type_c_types[dst.default_clock_type] = c_type
726
727 # create options
728 iden_prefix_def = False
729 def_dst_name_def = False
730 opts_node = self.config_node.get('options')
731 iden_prefix = 'barectf_'
732 file_name_prefix = 'barectf'
733
734 if opts_node is not None:
735 code_gen_opts_node = opts_node.get('code-generation')
736
737 if code_gen_opts_node is not None:
738 prefix_node = code_gen_opts_node.get('prefix', 'barectf')
739
740 if type(prefix_node) is str:
741 # automatic prefixes
742 iden_prefix = f'{prefix_node}_'
743 file_name_prefix = prefix_node
744 else:
745 iden_prefix = prefix_node['identifier']
746 file_name_prefix = prefix_node['file-name']
747
748 header_opts = code_gen_opts_node.get('header')
749
750 if header_opts is not None:
751 iden_prefix_def = header_opts.get('identifier-prefix-definition', False)
752 def_dst_name_def = header_opts.get('default-data-stream-type-name-definition',
753 False)
754
755 header_opts = barectf_config.ConfigurationCodeGenerationHeaderOptions(iden_prefix_def,
756 def_dst_name_def)
757 cg_opts = barectf_config.ConfigurationCodeGenerationOptions(iden_prefix, file_name_prefix,
758 def_dst, header_opts,
759 clk_type_c_types)
760 opts = barectf_config.ConfigurationOptions(cg_opts)
761
762 # create configuration
763 self._config = barectf_config.Configuration(trace, opts)
764
765 # Expands the field type aliases found in the trace type node.
766 #
767 # This method modifies the trace type node.
768 #
769 # When this method returns:
770 #
771 # * Any field type alias is replaced with its full field type
772 # node equivalent.
773 #
774 # * The `$field-type-aliases` property of the trace type node is
775 # removed.
776 def _expand_ft_aliases(self):
777 def resolve_ft_alias_from(parent_node: _MapNode, key: str):
778 if key not in parent_node:
779 return
780
781 if type(parent_node[key]) not in [collections.OrderedDict, str]:
782 return
783
784 self._resolve_ft_alias_from(ft_aliases_node, parent_node, key)
785
786 ft_aliases_node = self._trace_type_node['$field-type-aliases']
787
788 # Expand field type aliases within trace, data stream, and event
789 # record type nodes.
790 features_prop_name = '$features'
791
792 try:
793 features_node = self._trace_type_node.get(features_prop_name)
794
795 if features_node is not None:
796 try:
797 resolve_ft_alias_from(features_node, 'magic-field-type')
798 resolve_ft_alias_from(features_node, 'uuid-field-type')
799 resolve_ft_alias_from(features_node, 'data-stream-type-id-field-type')
800 except _ConfigurationParseError as exc:
801 _append_error_ctx(exc, f'`{features_prop_name}` property')
802 except _ConfigurationParseError as exc:
803 _append_error_ctx(exc, 'Trace type')
804
805 for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
806 try:
807 features_node = dst_node.get(features_prop_name)
808
809 if features_node is not None:
810 try:
811 pkt_prop_name = 'packet'
812 pkt_node = features_node.get(pkt_prop_name)
813
814 if pkt_node is not None:
815 try:
816 resolve_ft_alias_from(pkt_node, 'total-size-field-type')
817 resolve_ft_alias_from(pkt_node, 'content-size-field-type')
818 resolve_ft_alias_from(pkt_node, 'beginning-timestamp-field-type')
819 resolve_ft_alias_from(pkt_node, 'end-timestamp-field-type')
820 resolve_ft_alias_from(pkt_node,
821 'discarded-event-records-counter-snapshot-field-type')
822 resolve_ft_alias_from(pkt_node, 'sequence-number-field-type')
823 except _ConfigurationParseError as exc:
824 _append_error_ctx(exc, f'`{pkt_prop_name}` property')
825
826 er_prop_name = 'event-record'
827 er_node = features_node.get(er_prop_name)
828
829 if er_node is not None:
830 try:
831 resolve_ft_alias_from(er_node, 'type-id-field-type')
832 resolve_ft_alias_from(er_node, 'timestamp-field-type')
833 except _ConfigurationParseError as exc:
834 _append_error_ctx(exc, f'`{er_prop_name}` property')
835 except _ConfigurationParseError as exc:
836 _append_error_ctx(exc, f'`{features_prop_name}` property')
837
838 pkt_ctx_ft_extra_members_prop_name = 'packet-context-field-type-extra-members'
839 pkt_ctx_ft_extra_members_node = dst_node.get(pkt_ctx_ft_extra_members_prop_name)
840
841 if pkt_ctx_ft_extra_members_node is not None:
842 try:
843 for member_node in pkt_ctx_ft_extra_members_node:
844 member_node = list(member_node.values())[0]
845 resolve_ft_alias_from(member_node, 'field-type')
846 except _ConfigurationParseError as exc:
847 _append_error_ctx(exc, f'`{pkt_ctx_ft_extra_members_prop_name}` property')
848
849 resolve_ft_alias_from(dst_node, 'event-record-common-context-field-type')
850
851 for ert_name, ert_node in dst_node['event-record-types'].items():
852 try:
853 resolve_ft_alias_from(ert_node, 'specific-context-field-type')
854 resolve_ft_alias_from(ert_node, 'payload-field-type')
855 except _ConfigurationParseError as exc:
856 _append_error_ctx(exc, f'Event record type `{ert_name}`')
857 except _ConfigurationParseError as exc:
858 _append_error_ctx(exc, f'Data stream type `{dst_name}`')
859
860 # remove the (now unneeded) `$field-type-aliases` property
861 del self._trace_type_node['$field-type-aliases']
862
863 # Applies field type inheritance to all field type nodes found in
864 # the trace type node.
865 #
866 # This method modifies the trace type node.
867 #
868 # When this method returns, no field type node has an `$inherit`
869 # property.
870 def _apply_fts_inheritance(self):
871 def apply_ft_inheritance(parent_node: _MapNode, key: str):
872 if key not in parent_node:
873 return
874
875 if type(parent_node[key]) is not collections.OrderedDict:
876 return
877
878 self._apply_ft_inheritance(parent_node, key)
879
880 features_prop_name = '$features'
881 features_node = self._trace_type_node.get(features_prop_name)
882
883 if features_node is not None:
884 apply_ft_inheritance(features_node, 'magic-field-type')
885 apply_ft_inheritance(features_node, 'uuid-field-type')
886 apply_ft_inheritance(features_node, 'data-stream-type-id-field-type')
887
888 for dst_node in self._trace_type_node['data-stream-types'].values():
889 features_node = dst_node.get(features_prop_name)
890
891 if features_node is not None:
892 pkt_node = features_node.get('packet')
893
894 if pkt_node is not None:
895 apply_ft_inheritance(pkt_node, 'total-size-field-type')
896 apply_ft_inheritance(pkt_node, 'content-size-field-type')
897 apply_ft_inheritance(pkt_node, 'beginning-timestamp-field-type')
898 apply_ft_inheritance(pkt_node, 'end-timestamp-field-type')
899 apply_ft_inheritance(pkt_node,
900 'discarded-event-records-counter-snapshot-field-type')
901 apply_ft_inheritance(pkt_node, 'sequence-number-field-type')
902
903 er_node = features_node.get('event-record')
904
905 if er_node is not None:
906 apply_ft_inheritance(er_node, 'type-id-field-type')
907 apply_ft_inheritance(er_node, 'timestamp-field-type')
908
909 pkt_ctx_ft_extra_members_node = dst_node.get('packet-context-field-type-extra-members')
910
911 if pkt_ctx_ft_extra_members_node is not None:
912 for member_node in pkt_ctx_ft_extra_members_node:
913 member_node = list(member_node.values())[0]
914 apply_ft_inheritance(member_node, 'field-type')
915
916 apply_ft_inheritance(dst_node, 'event-record-common-context-field-type')
917
918 for ert_node in dst_node['event-record-types'].values():
919 apply_ft_inheritance(ert_node, 'specific-context-field-type')
920 apply_ft_inheritance(ert_node, 'payload-field-type')
921
922 # Normalizes structure field type member nodes.
923 #
924 # A structure field type member node can look like this:
925 #
926 # - msg: custom-string
927 #
928 # which is the equivalent of this:
929 #
930 # - msg:
931 # field-type: custom-string
932 #
933 # This method normalizes form 1 to use form 2.
934 def _normalize_struct_ft_member_nodes(self):
935 def normalize_members_node(members_node: List[_MapNode]):
936 ft_prop_name = 'field-type'
937
938 for member_node in members_node:
939 member_name, val_node = list(member_node.items())[0]
940
941 if type(val_node) is str:
942 member_node[member_name] = collections.OrderedDict({
943 ft_prop_name: val_node
944 })
945
946 normalize_struct_ft_member_nodes(member_node[member_name], ft_prop_name)
947
948 def normalize_struct_ft_member_nodes(parent_node: _MapNode, key: str):
949 if type(parent_node) is not collections.OrderedDict:
950 return
951
952 ft_node = parent_node.get(key)
953
954 if type(ft_node) is not collections.OrderedDict:
955 return
956
957 ft_node = typing.cast(collections.OrderedDict, ft_node)
958 members_nodes = ft_node.get('members')
959
960 if members_nodes is not None:
961 normalize_members_node(members_nodes)
962
963 prop_name = '$field-type-aliases'
964 ft_aliases_node = self._trace_type_node.get(prop_name)
965
966 if ft_aliases_node is not None:
967 for alias in ft_aliases_node:
968 normalize_struct_ft_member_nodes(ft_aliases_node, alias)
969
970 features_prop_name = '$features'
971 features_node = self._trace_type_node.get(features_prop_name)
972
973 if features_node is not None:
974 normalize_struct_ft_member_nodes(features_node, 'magic-field-type')
975 normalize_struct_ft_member_nodes(features_node, 'uuid-field-type')
976 normalize_struct_ft_member_nodes(features_node, 'data-stream-type-id-field-type')
977
978 for dst_node in self._trace_type_node['data-stream-types'].values():
979 features_node = dst_node.get(features_prop_name)
980
981 if features_node is not None:
982 pkt_node = features_node.get('packet')
983
984 if pkt_node is not None:
985 normalize_struct_ft_member_nodes(pkt_node, 'total-size-field-type')
986 normalize_struct_ft_member_nodes(pkt_node, 'content-size-field-type')
987 normalize_struct_ft_member_nodes(pkt_node, 'beginning-timestamp-field-type')
988 normalize_struct_ft_member_nodes(pkt_node, 'end-timestamp-field-type')
989 normalize_struct_ft_member_nodes(pkt_node,
990 'discarded-event-records-counter-snapshot-field-type')
991 normalize_struct_ft_member_nodes(pkt_node, 'sequence-number-field-type')
992
993 er_node = features_node.get('event-record')
994
995 if er_node is not None:
996 normalize_struct_ft_member_nodes(er_node, 'type-id-field-type')
997 normalize_struct_ft_member_nodes(er_node, 'timestamp-field-type')
998
999 pkt_ctx_ft_extra_members_node = dst_node.get('packet-context-field-type-extra-members')
1000
1001 if pkt_ctx_ft_extra_members_node is not None:
1002 normalize_members_node(pkt_ctx_ft_extra_members_node)
1003
1004 normalize_struct_ft_member_nodes(dst_node, 'event-record-common-context-field-type')
1005
1006 for ert_node in dst_node['event-record-types'].values():
1007 normalize_struct_ft_member_nodes(ert_node, 'specific-context-field-type')
1008 normalize_struct_ft_member_nodes(ert_node, 'payload-field-type')
1009
1010 # Calls _expand_ft_aliases() and _apply_fts_inheritance() if the
1011 # trace type node has a `$field-type-aliases` property.
1012 def _expand_fts(self):
1013 # Make sure that the current configuration node is valid
1014 # considering field types are not expanded yet.
1015 self._schema_validator.validate(self.config_node,
1016 'config/3/config-pre-field-type-expansion')
1017
1018 prop_name = '$field-type-aliases'
1019 ft_aliases_node = self._trace_type_node.get(prop_name)
1020
1021 if ft_aliases_node is None:
1022 # If there's no `'$field-type-aliases'` node, then there's
1023 # no field type aliases and therefore no possible
1024 # inheritance.
1025 if prop_name in self._trace_type_node:
1026 del self._trace_type_node[prop_name]
1027
1028 return
1029
1030 # normalize structure field type member nodes
1031 self._normalize_struct_ft_member_nodes()
1032
1033 # first, expand field type aliases
1034 self._expand_ft_aliases()
1035
1036 # next, apply inheritance to create effective field type nodes
1037 self._apply_fts_inheritance()
1038
1039 # Substitute the event record type node log level aliases with their
1040 # numeric equivalents.
1041 #
1042 # Removes the `$log-level-aliases` property of the trace type node.
1043 def _sub_log_level_aliases(self):
1044 # Make sure that the current configuration node is valid
1045 # considering log level aliases are not substituted yet.
1046 self._schema_validator.validate(self.config_node,
1047 'config/3/config-pre-log-level-alias-sub')
1048
1049 log_level_aliases_prop_name = '$log-level-aliases'
1050 log_level_aliases_node = self._trace_type_node.get(log_level_aliases_prop_name)
1051
1052 if log_level_aliases_prop_name in self._trace_type_node:
1053 del self._trace_type_node[log_level_aliases_prop_name]
1054
1055 if log_level_aliases_node is None:
1056 # no log level aliases
1057 return
1058
1059 # substitute log level aliases
1060 for dst_name, dst_node in self._trace_type_node['data-stream-types'].items():
1061 try:
1062 for ert_name, ert_node in dst_node['event-record-types'].items():
1063 try:
1064 prop_name = 'log-level'
1065 ll_node = ert_node.get(prop_name)
1066
1067 if ll_node is None:
1068 continue
1069
1070 if type(ll_node) is str:
1071 if ll_node not in log_level_aliases_node:
1072 raise _ConfigurationParseError(f'`{prop_name}` property',
1073 f'Log level alias `{ll_node}` does not exist')
1074
1075 ert_node[prop_name] = log_level_aliases_node[ll_node]
1076 except _ConfigurationParseError as exc:
1077 _append_error_ctx(exc, f'Event record type `{ert_name}`')
1078 except _ConfigurationParseError as exc:
1079 _append_error_ctx(exc, f'Data stream type `{dst_name}`')
1080
1081 # Generator of parent node and key pairs for all the nodes,
1082 # recursively, of `node`.
1083 #
1084 # It is safe to delete a yielded node during the iteration.
1085 @staticmethod
1086 def _props(node: Any) -> Iterable[Tuple[Any, str]]:
1087 if type(node) is collections.OrderedDict:
1088 for key in list(node):
1089 yield from _Parser._props(node[key])
1090 yield node, key
1091 elif type(node) is list:
1092 for item_node in node:
1093 yield from _Parser._props(item_node)
1094
1095 def _trace_type_props(self) -> Iterable[Tuple[Any, str]]:
1096 yield from _Parser._props(self.config_node['trace']['type'])
1097
1098 def _set_trace_byte_order_prop_key(self):
1099 if 'native-byte-order' in self._trace_type_node:
1100 self._trace_byte_order_prop_key = 'native-byte-order'
1101 else:
1102 self._trace_byte_order_prop_key = 'trace-byte-order'
1103
1104 # Normalize the properties of the configuration node.
1105 #
1106 # This method, for each property of the trace type node:
1107 #
1108 # 1. Removes it if it's `None` (means default).
1109 #
1110 # 2. Chooses a specific `class` property value.
1111 #
1112 # 3. Chooses a specific trace byte order property value.
1113 #
1114 # 4. Chooses a specific `preferred-display-base` property value.
1115 #
1116 # This method also applies 1. to the trace node's `environment`
1117 # property.
1118 def _normalize_props(self):
1119 def normalize_byte_order_prop(parent_node: _MapNode, key: str):
1120 node = parent_node[key]
1121
1122 if node in ['be', 'big']:
1123 parent_node[key] = 'big-endian'
1124 elif node in ['le', 'little']:
1125 parent_node[key] = 'little-endian'
1126
1127 trace_node = self.config_node['trace']
1128 normalize_byte_order_prop(self._trace_type_node, self._trace_byte_order_prop_key)
1129
1130 for parent_node, key in self._trace_type_props():
1131 node = parent_node[key]
1132
1133 if node is None:
1134 # a `None` property is equivalent to not having it
1135 del parent_node[key]
1136 continue
1137
1138 if key == 'class' and type(node) is str:
1139 # field type class aliases
1140 if node in ['uint', 'unsigned-int']:
1141 parent_node[key] = 'unsigned-integer'
1142 elif node in ['sint', 'signed-int']:
1143 parent_node[key] = 'signed-integer'
1144 elif node in ['uenum', 'unsigned-enum']:
1145 parent_node[key] = 'unsigned-enumeration'
1146 elif node in ['senum', 'signed-enum']:
1147 parent_node[key] = 'signed-enumeration'
1148 elif node == 'str':
1149 parent_node[key] = 'string'
1150 elif node == 'struct':
1151 parent_node[key] = 'structure'
1152 elif key == 'preferred-display-base' and type(node) is str:
1153 # display base aliases
1154 if node == 'bin':
1155 parent_node[key] = 'binary'
1156 elif node == 'oct':
1157 parent_node[key] = 'octal'
1158 elif node == 'dec':
1159 parent_node[key] = 'decimal'
1160 elif node == 'hex':
1161 parent_node[key] = 'hexadecimal'
1162
1163 prop_name = 'environment'
1164
1165 if prop_name in trace_node:
1166 node = trace_node[prop_name]
1167
1168 if node is None:
1169 del trace_node[prop_name]
1170
1171 # Sets the parser's trace byte order.
1172 def _set_trace_byte_order(self):
1173 self._trace_byte_order_node = self._trace_type_node[self._trace_byte_order_prop_key]
1174 self._trace_byte_order = self._byte_order_from_node(self._trace_byte_order_node)
1175
1176 # Processes the inclusions of the event record type node
1177 # `ert_node`, returning the effective node.
1178 def _process_ert_node_include(self, ert_node: _MapNode) -> _MapNode:
1179 # Make sure the event record type node is valid for the
1180 # inclusion processing stage.
1181 self._schema_validator.validate(ert_node, 'config/3/ert-pre-include')
1182
1183 # process inclusions
1184 return self._process_node_include(ert_node, self._process_ert_node_include)
1185
1186 # Processes the inclusions of the data stream type node `dst_node`,
1187 # returning the effective node.
1188 def _process_dst_node_include(self, dst_node: _MapNode) -> _MapNode:
1189 def process_children_include(dst_node: _MapNode):
1190 prop_name = 'event-record-types'
1191
1192 if prop_name in dst_node:
1193 erts_node = dst_node[prop_name]
1194
1195 for key in list(erts_node):
1196 erts_node[key] = self._process_ert_node_include(erts_node[key])
1197
1198 # Make sure the data stream type node is valid for the inclusion
1199 # processing stage.
1200 self._schema_validator.validate(dst_node, 'config/3/dst-pre-include')
1201
1202 # process inclusions
1203 return self._process_node_include(dst_node, self._process_dst_node_include,
1204 process_children_include)
1205
1206 # Processes the inclusions of the clock type node `clk_type_node`,
1207 # returning the effective node.
1208 def _process_clk_type_node_include(self, clk_type_node: _MapNode) -> _MapNode:
1209 # Make sure the clock type node is valid for the inclusion
1210 # processing stage.
1211 self._schema_validator.validate(clk_type_node, 'config/3/clock-type-pre-include')
1212
1213 # process inclusions
1214 return self._process_node_include(clk_type_node, self._process_clk_type_node_include)
1215
1216 # Processes the inclusions of the trace type node `trace_type_node`,
1217 # returning the effective node.
1218 def _process_trace_type_node_include(self, trace_type_node: _MapNode) -> _MapNode:
1219 def process_children_include(trace_type_node: _MapNode):
1220 prop_name = 'clock-types'
1221
1222 if prop_name in trace_type_node:
1223 clk_types_node = trace_type_node[prop_name]
1224
1225 for key in list(clk_types_node):
1226 clk_types_node[key] = self._process_clk_type_node_include(clk_types_node[key])
1227
1228 prop_name = 'data-stream-types'
1229
1230 if prop_name in trace_type_node:
1231 dsts_node = trace_type_node[prop_name]
1232
1233 for key in list(dsts_node):
1234 dsts_node[key] = self._process_dst_node_include(dsts_node[key])
1235
1236 # Make sure the trace type node is valid for the inclusion
1237 # processing stage.
1238 self._schema_validator.validate(trace_type_node, 'config/3/trace-type-pre-include')
1239
1240 # process inclusions
1241 return self._process_node_include(trace_type_node, self._process_trace_type_node_include,
1242 process_children_include)
1243
1244 # Processes the inclusions of the trace node `trace_node`, returning
1245 # the effective node.
1246 def _process_trace_node_include(self, trace_node: _MapNode) -> _MapNode:
1247 def process_children_include(trace_node: _MapNode):
1248 prop_name = 'type'
1249
1250 if prop_name in trace_node:
1251 trace_node[prop_name] = self._process_trace_type_node_include(trace_node[prop_name])
1252
1253 # Make sure the trace node is valid for the inclusion processing
1254 # stage.
1255 self._schema_validator.validate(trace_node, 'config/3/trace-pre-include')
1256
1257 # process inclusions
1258 return self._process_node_include(trace_node, self._process_trace_node_include,
1259 process_children_include)
1260
1261 # Processes the inclusions of the configuration node, modifying it
1262 # during the process.
1263 def _process_config_includes(self):
1264 # Process inclusions in this order:
1265 #
1266 # 1. Clock type node and event record type nodes (the order
1267 # between those is not important).
1268 #
1269 # 2. Data stream type nodes.
1270 #
1271 # 3. Trace type node.
1272 #
1273 # 4. Trace node.
1274 #
1275 # This is because:
1276 #
1277 # * A trace node can include a trace type node, clock type
1278 # nodes, data stream type nodes, and event record type nodes.
1279 #
1280 # * A trace type node can include clock type nodes, data stream
1281 # type nodes, and event record type nodes.
1282 #
1283 # * A data stream type node can include event record type nodes.
1284 #
1285 # First, make sure the configuration node itself is valid for
1286 # the inclusion processing stage.
1287 self._schema_validator.validate(self.config_node, 'config/3/config-pre-include')
1288
1289 # Process trace node inclusions.
1290 #
1291 # self._process_trace_node_include() returns a new (or the same)
1292 # trace node without any `$include` property in it, recursively.
1293 self.config_node['trace'] = self._process_trace_node_include(self.config_node['trace'])
1294
1295 def _parse(self):
1296 # process configuration node inclusions
1297 self._process_config_includes()
1298
1299 # Expand field type nodes.
1300 #
1301 # This process:
1302 #
1303 # 1. Replaces field type aliases with "effective" field type
1304 # nodes, recursively.
1305 #
1306 # After this step, the `$field-type-aliases` property of the
1307 # trace type node is gone.
1308 #
1309 # 2. Applies inheritance, following the `$inherit` properties.
1310 #
1311 # After this step, field type nodes do not contain `$inherit`
1312 # properties.
1313 #
1314 # This is done blindly, in that the process _doesn't_ validate
1315 # field type nodes at this point.
1316 self._expand_fts()
1317
1318 # Substitute log level aliases.
1319 #
1320 # This process:
1321 #
1322 # 1. Replaces log level aliases in event record type nodes with
1323 # their numeric equivalents as found in the
1324 # `$log-level-aliases` property of the trace type node.
1325 #
1326 # 2. Removes the `$log-level-aliases` property from the trace
1327 # type node.
1328 self._sub_log_level_aliases()
1329
1330 # At this point, the configuration node must be valid as an
1331 # effective configuration node.
1332 self._schema_validator.validate(self.config_node, 'config/3/config')
1333
1334 # Set the trace byte order property key.
1335 self._set_trace_byte_order_prop_key()
1336
1337 # Normalize properties.
1338 #
1339 # This process removes `None` properties and chooses specific
1340 # enumerators when aliases exist (for example, `big-endian`
1341 # instead of `be`).
1342 #
1343 # The goal of this is that, if the user then gets this parser's
1344 # `config_node` property, it has a normal and very readable
1345 # form.
1346 #
1347 # It also makes _create_config() easier to implement because it
1348 # doesn't need to check for `None` nodes or enumerator aliases.
1349 self._normalize_props()
1350
1351 # Set the trace byte order.
1352 self._set_trace_byte_order()
1353
1354 # Create a barectf configuration object from the configuration
1355 # node.
1356 self._create_config()
1357
1358 @property
1359 def config(self) -> barectf_config.Configuration:
1360 return self._config
1361
1362 @property
1363 def config_node(self) -> _MapNode:
1364 return typing.cast(barectf_config_parse_common._ConfigNodeV3, self._root_node).config_node
This page took 0.058545 seconds and 4 git commands to generate.