datastore: Add a classic history tree implementation
[deliverable/tracecompass.git] / statesystem / org.eclipse.tracecompass.datastore.core / src / org / eclipse / tracecompass / internal / provisional / datastore / core / historytree / HTNode.java
1 /*******************************************************************************
2 * Copyright (c) 2017 École Polytechnique de Montréal
3 *
4 * All rights reserved. This program and the accompanying materials are
5 * made available under the terms of the Eclipse Public License v1.0 which
6 * accompanies this distribution, and is available at
7 * http://www.eclipse.org/legal/epl-v10.html
8 *******************************************************************************/
9
10 package org.eclipse.tracecompass.internal.provisional.datastore.core.historytree;
11
12 import java.io.IOException;
13 import java.io.PrintStream;
14 import java.nio.ByteBuffer;
15 import java.nio.ByteOrder;
16 import java.nio.channels.FileChannel;
17 import java.util.ArrayList;
18 import java.util.Arrays;
19 import java.util.Collection;
20 import java.util.Collections;
21 import java.util.Comparator;
22 import java.util.List;
23 import java.util.Objects;
24 import java.util.concurrent.locks.ReentrantReadWriteLock;
25 import java.util.function.Predicate;
26 import java.util.stream.Collectors;
27
28 import org.eclipse.jdt.annotation.NonNull;
29 import org.eclipse.jdt.annotation.Nullable;
30 import org.eclipse.tracecompass.internal.provisional.datastore.core.condition.RangeCondition;
31 import org.eclipse.tracecompass.internal.provisional.datastore.core.exceptions.RangeException;
32 import org.eclipse.tracecompass.internal.provisional.datastore.core.historytree.AbstractHistoryTree.IHTNodeFactory;
33 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTInterval;
34 import org.eclipse.tracecompass.internal.provisional.datastore.core.interval.IHTIntervalReader;
35 import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.ISafeByteBufferReader;
36 import org.eclipse.tracecompass.internal.provisional.datastore.core.serialization.SafeByteBufferFactory;
37
38 import com.google.common.annotations.VisibleForTesting;
39 import com.google.common.collect.ImmutableList;
40 import com.google.common.collect.Iterables;
41
42 /**
43 * The base class for all the types of nodes that go in the History Tree.
44 *
45 * @author Alexandre Montplaisir
46 * @author Geneviève Bastien
47 * @param <E>
48 * The type of objects that will be saved in the tree
49 */
50 public class HTNode<E extends IHTInterval> implements IHTNode<E> {
51
52 // ------------------------------------------------------------------------
53 // Class fields
54 // ------------------------------------------------------------------------
55
56 /**
57 * Size of the basic node header. This is backward-compatible with previous
58 * state sytem history trees
59 *
60 * <pre>
61 * 1 - byte (the type of node)
62 * 16 - 2x long (start time, end time)
63 * 16 - 3x int (seq number, parent seq number, intervalcount)
64 * 1 - byte (done or not)
65 * </pre>
66 */
67 @VisibleForTesting
68 public static final int COMMON_HEADER_SIZE = Byte.BYTES
69 + 2 * Long.BYTES
70 + 3 * Integer.BYTES
71 + Byte.BYTES;
72
73 // ------------------------------------------------------------------------
74 // Attributes
75 // ------------------------------------------------------------------------
76
77 /**
78 * Default implementation of the interval comparator, which sorts first by
79 * end times, then by start times
80 */
81 private final Comparator<E> fDefaultIntervalComparator = Comparator
82 .comparingLong(E::getEnd)
83 .thenComparingLong(E::getStart);
84
85 /* Node configuration, defined by the history tree */
86 private final int fBlockSize;
87 private final int fMaxChildren;
88
89 /* Time range of this node */
90 private final long fNodeStart;
91 private long fNodeEnd;
92
93 /* Sequence number = position in the node section of the file */
94 private final int fSequenceNumber;
95 private int fParentSequenceNumber; /* = -1 if this node is the root node */
96
97 /* Sum of bytes of all objects in the node */
98 private int fSizeOfContentSection;
99
100 /*
101 * True if this node was saved on disk (meaning its end time is now fixed)
102 */
103 private volatile boolean fIsOnDisk;
104
105 /* List containing all the intervals contained in this node */
106 private final List<E> fIntervals;
107
108 /* Lock used to protect the accesses to objects, nodeEnd and such */
109 private final ReentrantReadWriteLock fRwl = new ReentrantReadWriteLock(false);
110
111 /* Object containing extra data for core nodes */
112 private final @Nullable CoreNodeData fExtraData;
113
114 /**
115 * A class that encapsulates data about children of this node. This class
116 * will be constructed by the core node and contains the extra header data,
117 * methods to read/write the header data, etc.
118 *
119 * This base class for CORE nodes just saves the children, ie their sequence
120 * number.
121 *
122 * @author Geneviève Bastien
123 */
124 protected static class CoreNodeData {
125
126 /** Back-reference to the node class */
127 private final HTNode<?> fNode;
128
129 /**
130 * Seq. numbers of the children nodes (size = max number of nodes,
131 * determined by configuration)
132 */
133 private final int[] fChildren;
134
135 /** Nb. of children this node has */
136 private int fNbChildren;
137
138 /**
139 * Constructor
140 *
141 * @param node
142 * The node containing this extra data.
143 */
144 public CoreNodeData(HTNode<?> node) {
145 fNode = node;
146 fNbChildren = 0;
147 /*
148 * We instantiate the following array at full size right away, since
149 * we want to reserve that space in the node's header. "fNbChildren"
150 * will tell us how many relevant entries there are in those tables.
151 */
152 fChildren = new int[fNode.fMaxChildren];
153 }
154
155 /**
156 * Return this core data's full node. To be used by subclasses.
157 *
158 * @return The node
159 */
160 protected HTNode<?> getNode() {
161 return fNode;
162 }
163
164 /**
165 * Read the specific header for this extra node data
166 *
167 * @param buffer
168 * The byte buffer in which to read
169 */
170 protected void readSpecificHeader(ByteBuffer buffer) {
171 fNode.takeWriteLock();
172 try {
173 int size = fNode.getMaxChildren();
174
175 fNbChildren = buffer.getInt();
176
177 for (int i = 0; i < fNbChildren; i++) {
178 fChildren[i] = buffer.getInt();
179 }
180 for (int i = fNbChildren; i < size; i++) {
181 buffer.getInt();
182 }
183 } finally {
184 fNode.releaseWriteLock();
185 }
186 }
187
188 /**
189 * Write the specific header for this extra node data
190 *
191 * @param buffer
192 * The byte buffer in which to write
193 */
194 protected void writeSpecificHeader(ByteBuffer buffer) {
195 fNode.takeReadLock();
196 try {
197 buffer.putInt(fNbChildren);
198
199 /* Write the "children's seq number" array */
200 for (int i = 0; i < fNbChildren; i++) {
201 buffer.putInt(fChildren[i]);
202 }
203 for (int i = fNbChildren; i < fNode.getMaxChildren(); i++) {
204 buffer.putInt(0);
205 }
206 } finally {
207 fNode.releaseReadLock();
208 }
209 }
210
211 /**
212 * Get the number of children
213 *
214 * @return The number of children
215 */
216 public int getNbChildren() {
217 fNode.takeReadLock();
218 try {
219 return fNbChildren;
220 } finally {
221 fNode.releaseReadLock();
222 }
223 }
224
225 /**
226 * Get the child sequence number at an index
227 *
228 * @param index
229 * The index of the child to get
230 * @return The sequence number of the child node
231 */
232 public int getChild(int index) {
233 fNode.takeReadLock();
234 try {
235 if (index >= fNbChildren) {
236 throw new IndexOutOfBoundsException("The child at index " + index + " does not exist"); //$NON-NLS-1$ //$NON-NLS-2$
237 }
238 return fChildren[index];
239 } finally {
240 fNode.releaseReadLock();
241 }
242 }
243
244 /**
245 * Get the sequence number of the last child node of this one
246 *
247 * @return The sequence number of the last child
248 */
249 public int getLatestChild() {
250 fNode.takeReadLock();
251 try {
252 return fChildren[fNbChildren - 1];
253 } finally {
254 fNode.releaseReadLock();
255 }
256 }
257
258 /**
259 * Add a new child to this node's data
260 *
261 * @param childNode
262 * The child node to add
263 */
264 public void linkNewChild(IHTNode<?> childNode) {
265 fNode.takeWriteLock();
266 try {
267 if (fNbChildren >= fNode.getMaxChildren()) {
268 throw new IllegalStateException("Asked to link another child but parent already has maximum number of children"); //$NON-NLS-1$
269 }
270
271 fChildren[fNbChildren] = childNode.getSequenceNumber();
272 fNbChildren++;
273
274 } finally {
275 fNode.releaseWriteLock();
276 }
277 }
278
279 /**
280 * Get the size of the extra header space necessary for this node's
281 * extra data
282 *
283 * @return The extra header size
284 */
285 protected int getSpecificHeaderSize() {
286 int maxChildren = fNode.getMaxChildren();
287 int specificSize = Integer.BYTES /* 1x int (nbChildren) */
288
289 /* MAX_NB * int ('children' table) */
290 + Integer.BYTES * maxChildren;
291
292 return specificSize;
293 }
294
295 @Override
296 public String toString() {
297 /* Only used for debugging, shouldn't be externalized */
298 return String.format("Core Node, %d children %s", //$NON-NLS-1$
299 fNbChildren, Arrays.toString(Arrays.copyOf(fChildren, fNbChildren)));
300 }
301
302 /**
303 * Inner method to select the sequence numbers for the children of the
304 * current node that intersect the given timestamp. Useful for moving
305 * down the tree.
306 *
307 * @param timeCondition
308 * The time-based RangeCondition to choose which children
309 * match.
310 * @return Collection of sequence numbers of the child nodes that
311 * intersect t, non-null empty collection if this is a Leaf Node
312 */
313 public final Collection<Integer> selectNextChildren(RangeCondition<Long> timeCondition) {
314 fNode.takeReadLock();
315 try {
316 return selectNextIndices(timeCondition).stream()
317 .map(i -> fChildren[i])
318 .collect(Collectors.toList());
319 } finally {
320 fNode.releaseReadLock();
321 }
322 }
323
324 /**
325 * Inner method to select the indices of the children of the current
326 * node that intersect the given timestamp. Useful for moving down the
327 * tree.
328 *
329 * This is the method that children implementations of this node should
330 * override. They may call
331 * <code>super.selectNextIndices(timeCondition)</code> to get access to
332 * the subset of indices that match the parent's condition and add their
333 * own filters. When this method is called a read-lock is already taken
334 * on the node
335 *
336 * @param timeCondition
337 * The time-based RangeCondition to choose which children
338 * match.
339 * @return Collection of the indices of the child nodes that intersect
340 * the time condition
341 */
342 protected Collection<Integer> selectNextIndices(RangeCondition<Long> timeCondition) {
343 /* By default, all children are returned */
344 List<Integer> childList = new ArrayList<>();
345 for (int i = 0; i < fNbChildren; i++) {
346 childList.add(i);
347 }
348
349 return childList;
350 }
351
352 @Override
353 public int hashCode() {
354 /*
355 * Do not consider "fNode", since the node's equals/hashCode already
356 * consider us.
357 */
358 return Objects.hash(fNbChildren, fChildren);
359 }
360
361 @Override
362 public boolean equals(@Nullable Object obj) {
363 if (obj == null) {
364 return false;
365 }
366 if (obj.getClass() != getClass()) {
367 return false;
368 }
369 CoreNodeData other = (CoreNodeData) obj;
370 return (fNbChildren == other.fNbChildren
371 && Arrays.equals(fChildren, other.fChildren));
372 }
373
374 }
375
376 /**
377 * Constructor
378 *
379 * @param type
380 * The type of this node
381 * @param blockSize
382 * The size (in bytes) of a serialized node on disk
383 * @param maxChildren
384 * The maximum allowed number of children per node
385 * @param seqNumber
386 * The (unique) sequence number assigned to this particular node
387 * @param parentSeqNumber
388 * The sequence number of this node's parent node
389 * @param start
390 * The earliest timestamp stored in this node
391 */
392 public HTNode(NodeType type, int blockSize, int maxChildren, int seqNumber,
393 int parentSeqNumber, long start) {
394 fBlockSize = blockSize;
395 fMaxChildren = maxChildren;
396
397 fNodeStart = start;
398 fSequenceNumber = seqNumber;
399 fParentSequenceNumber = parentSeqNumber;
400
401 fSizeOfContentSection = 0;
402 fIsOnDisk = false;
403 fIntervals = new ArrayList<>();
404
405 fExtraData = createNodeExtraData(type);
406 }
407
408 /**
409 * Reader factory method. Build a Node object (of the right type) by reading
410 * a block in the file.
411 *
412 * @param blockSize
413 * The size (in bytes) of a serialized node on disk
414 * @param maxChildren
415 * The maximum allowed number of children per node
416 * @param fc
417 * FileChannel to the history file, ALREADY SEEKED at the start
418 * of the node.
419 * @param objectReader
420 * The reader to read serialized node objects
421 * @param nodeFactory
422 * The factory to create the nodes for this tree
423 * @return The node object
424 * @throws IOException
425 * If there was an error reading from the file channel
426 */
427 public static final <E extends IHTInterval, N extends HTNode<E>> N readNode(
428 int blockSize,
429 int maxChildren,
430 FileChannel fc,
431 IHTIntervalReader<E> objectReader,
432 IHTNodeFactory<E, N> nodeFactory) throws IOException {
433
434 N newNode;
435
436 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
437 buffer.order(ByteOrder.LITTLE_ENDIAN);
438 buffer.clear();
439 int res = fc.read(buffer);
440 if (res != blockSize) {
441 throw new IOException("The block for the HTNode is not the right size: " + res); //$NON-NLS-1$
442 }
443 buffer.flip();
444
445 /* Read the common header part */
446 byte typeByte = buffer.get();
447 NodeType type = NodeType.fromByte(typeByte);
448 long start = buffer.getLong();
449 long end = buffer.getLong();
450 int seqNb = buffer.getInt();
451 int parentSeqNb = buffer.getInt();
452 int intervalCount = buffer.getInt();
453 buffer.get(); // TODO Used to be "isDone", to be removed from the header
454
455 /* Now the rest of the header depends on the node type */
456 switch (type) {
457 case CORE:
458 case LEAF:
459 newNode = nodeFactory.createNode(type, blockSize, maxChildren, seqNb, parentSeqNb, start);
460 newNode.readSpecificHeader(buffer);
461 break;
462
463 default:
464 /* Unrecognized node type */
465 throw new IOException();
466 }
467
468 /*
469 * At this point, we should be done reading the header and 'buffer'
470 * should only have the intervals left
471 */
472 ISafeByteBufferReader readBuffer = SafeByteBufferFactory.wrapReader(buffer, res - buffer.position());
473 for (int i = 0; i < intervalCount; i++) {
474 E interval = objectReader.readInterval(readBuffer);
475 newNode.add(interval);
476 }
477
478 /* Assign the node's other information we have read previously */
479 newNode.setNodeEnd(end);
480 newNode.setOnDisk();
481
482 return newNode;
483 }
484
485 /**
486 * Create a node's extra data object for the given node type
487 *
488 * @param type
489 * The type of node
490 * @return The node's extra data object, or <code>null</code> if there is
491 * none
492 */
493 protected @Nullable CoreNodeData createNodeExtraData(NodeType type) {
494 if (type == NodeType.CORE) {
495 return new CoreNodeData(this);
496 }
497 return null;
498 }
499
500 @Override
501 public final void writeSelf(FileChannel fc) throws IOException {
502 /*
503 * Yes, we are taking the *read* lock here, because we are reading the
504 * information in the node to write it to disk.
505 */
506 fRwl.readLock().lock();
507 try {
508 final int blockSize = getBlockSize();
509
510 ByteBuffer buffer = ByteBuffer.allocate(blockSize);
511 buffer.order(ByteOrder.LITTLE_ENDIAN);
512 buffer.clear();
513
514 /* Write the common header part */
515 buffer.put(getNodeType().toByte());
516 buffer.putLong(fNodeStart);
517 buffer.putLong(fNodeEnd);
518 buffer.putInt(fSequenceNumber);
519 buffer.putInt(fParentSequenceNumber);
520 buffer.putInt(fIntervals.size());
521 buffer.put((byte) 1); // TODO Used to be "isDone", to be removed
522 // from header
523
524 /* Now call the inner method to write the specific header part */
525 writeSpecificHeader(buffer);
526
527 /* Back to us, we write the intervals */
528 fIntervals.forEach(i -> i.writeSegment(SafeByteBufferFactory.wrapWriter(buffer, i.getSizeOnDisk())));
529 if (blockSize - buffer.position() != getNodeFreeSpace()) {
530 throw new IllegalStateException("Wrong free space: Actual: " + (blockSize - buffer.position()) + ", Expected: " + getNodeFreeSpace()); //$NON-NLS-1$ //$NON-NLS-2$
531 }
532 /*
533 * Fill the rest with zeros
534 */
535 while (buffer.position() < blockSize) {
536 buffer.put((byte) 0);
537 }
538
539 /* Finally, write everything in the Buffer to disk */
540 buffer.flip();
541 int res = fc.write(buffer);
542 if (res != blockSize) {
543 throw new IllegalStateException("Wrong size of block written: Actual: " + res + ", Expected: " + blockSize); //$NON-NLS-1$ //$NON-NLS-2$
544 }
545
546 } finally {
547 fRwl.readLock().unlock();
548 }
549 fIsOnDisk = true;
550 }
551
552 // ------------------------------------------------------------------------
553 // Accessors
554 // ------------------------------------------------------------------------
555
556 /**
557 * Get this node's block size.
558 *
559 * @return The block size
560 */
561 protected final int getBlockSize() {
562 return fBlockSize;
563 }
564
565 /**
566 * Get this node's maximum amount of children.
567 *
568 * @return The maximum amount of children
569 */
570 protected final int getMaxChildren() {
571 return fMaxChildren;
572 }
573
574 /**
575 * Get the interval comparator. Intervals will always be stored sorted
576 * according to this comparator. This can be used by insertion or retrieval
577 * algorithms.
578 *
579 * Sub-classes may override this to change or specify the interval
580 * comparator.
581 *
582 * @return The way intervals are to be sorted in this node
583 */
584 protected Comparator<E> getIntervalComparator() {
585 return fDefaultIntervalComparator;
586 }
587
588 @Override
589 public long getNodeStart() {
590 return fNodeStart;
591 }
592
593 @Override
594 public long getNodeEnd() {
595 if (fIsOnDisk) {
596 return fNodeEnd;
597 }
598 return Long.MAX_VALUE;
599 }
600
601 @Override
602 public int getSequenceNumber() {
603 return fSequenceNumber;
604 }
605
606 @Override
607 public int getParentSequenceNumber() {
608 return fParentSequenceNumber;
609 }
610
611 @Override
612 public void setParentSequenceNumber(int newParent) {
613 fParentSequenceNumber = newParent;
614 }
615
616 @Override
617 public boolean isOnDisk() {
618 return fIsOnDisk;
619 }
620
621 /**
622 * Get the node's extra data.
623 *
624 * @return The node extra data
625 */
626 protected @Nullable CoreNodeData getCoreNodeData() {
627 return fExtraData;
628 }
629
630 /**
631 * Get the list of objects in this node. This list is immutable. All objects
632 * must be inserted through the {@link #add(IHTInterval)} method
633 *
634 * @return The list of intervals in this node
635 */
636 protected List<E> getIntervals() {
637 return ImmutableList.copyOf(fIntervals);
638 }
639
640 /**
641 * Set this node's end time. Called by the reader factory.
642 *
643 * @param end
644 * The end time of the node
645 */
646 protected void setNodeEnd(long end) {
647 fNodeEnd = end;
648 }
649
650 /**
651 * Set this node to be on disk. Called by the reader factory.
652 */
653 protected void setOnDisk() {
654 fIsOnDisk = true;
655 }
656
657 @Override
658 public void add(E newInterval) {
659 fRwl.writeLock().lock();
660 try {
661 /*
662 * Just in case, should be checked before even calling this function
663 */
664 int objSize = newInterval.getSizeOnDisk();
665 if (objSize > getNodeFreeSpace()) {
666 throw new IllegalArgumentException("The interval to insert (" + objSize + ") is larger than available space (" + getNodeFreeSpace() + ")"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
667 }
668
669 int insertPoint = Collections.binarySearch(fIntervals, newInterval, getIntervalComparator());
670 insertPoint = (insertPoint >= 0 ? insertPoint : -insertPoint - 1);
671 fIntervals.add(insertPoint, newInterval);
672
673 fSizeOfContentSection += objSize;
674
675 } finally {
676 fRwl.writeLock().unlock();
677 }
678 }
679
680 @Override
681 public Iterable<E> getMatchingIntervals(RangeCondition<Long> timeCondition,
682 Predicate<E> extraPredicate) {
683
684 // TODO Use getIntervalComparator() to restrict the dataset further
685 // TODO Benchmark using/returning streams instead of iterables
686
687 if (isOnDisk()) {
688 @NonNull Iterable<E> ret = fIntervals;
689 ret = Iterables.filter(ret, interval -> timeCondition.intersects(interval.getStart(), interval.getEnd()));
690 ret = Iterables.filter(ret, interval -> extraPredicate.test(interval));
691 return ret;
692 }
693
694 takeReadLock();
695 try {
696 return fIntervals.stream()
697 .filter(interval -> timeCondition.intersects(interval.getStart(), interval.getEnd()))
698 .filter(extraPredicate)
699 /*
700 * Because this class works with read locks, we can't
701 * return a lazy stream unfortunately. Room for improvement?
702 */
703 .collect(Collectors.toList());
704
705 } finally {
706 releaseReadLock();
707 }
708 }
709
710 @Override
711 public void closeThisNode(long endtime) {
712 fRwl.writeLock().lock();
713 try {
714 /**
715 * FIXME: was assert (endtime >= fNodeStart); but that exception is
716 * reached with an empty node that has start time endtime + 1
717 */
718 // if (endtime < fNodeStart) {
719 // throw new IllegalArgumentException("Endtime " + endtime + "
720 // cannot be lower than start time " + fNodeStart);
721 // }
722
723 if (!fIntervals.isEmpty()) {
724 /*
725 * Make sure there are no intervals in this node with their
726 * EndTime > the one requested. Only need to check the last one
727 * since they are sorted
728 */
729 if (endtime < fIntervals.get(fIntervals.size() - 1).getEnd()) {
730 throw new IllegalArgumentException("Closing end time should be greater than or equal to the end time of the objects of this node"); //$NON-NLS-1$
731 }
732 }
733
734 fNodeEnd = endtime;
735 } finally {
736 fRwl.writeLock().unlock();
737 }
738 }
739
740 @Override
741 public final int getTotalHeaderSize() {
742 return COMMON_HEADER_SIZE + getSpecificHeaderSize();
743 }
744
745 /**
746 * @return The offset, within the node, where the Data section ends
747 */
748 private int getDataSectionEndOffset() {
749 return getTotalHeaderSize() + fSizeOfContentSection;
750 }
751
752 @Override
753 public int getNodeFreeSpace() {
754 fRwl.readLock().lock();
755 try {
756 int ret = getBlockSize() - getDataSectionEndOffset();
757 return ret;
758 } finally {
759 fRwl.readLock().unlock();
760 }
761 }
762
763 @Override
764 public long getNodeUsagePercent() {
765 fRwl.readLock().lock();
766 try {
767 final int blockSize = getBlockSize();
768 float freePercent = (float) getNodeFreeSpace()
769 / (float) (blockSize - getTotalHeaderSize())
770 * 100F;
771 return (long) (100L - freePercent);
772
773 } finally {
774 fRwl.readLock().unlock();
775 }
776 }
777
778 @Override
779 public NodeType getNodeType() {
780 @Nullable
781 CoreNodeData extraData = getCoreNodeData();
782 if (extraData == null) {
783 return NodeType.LEAF;
784 }
785 return NodeType.CORE;
786 }
787
788 /**
789 * Return the specific header size of this node. This means the size
790 * occupied by the type-specific section of the header (not counting the
791 * common part).
792 *
793 * @return The specific header size
794 */
795 protected int getSpecificHeaderSize() {
796 CoreNodeData extraData = fExtraData;
797 if (extraData != null) {
798 return extraData.getSpecificHeaderSize();
799 }
800 return 0;
801 }
802
803 /**
804 * Read the specific header for this node
805 *
806 * @param buffer
807 * The buffer to read from
808 */
809 protected void readSpecificHeader(ByteBuffer buffer) {
810 CoreNodeData extraData = fExtraData;
811 if (extraData != null) {
812 extraData.readSpecificHeader(buffer);
813 }
814 }
815
816 /**
817 * Write the type-specific part of the header in a byte buffer.
818 *
819 * @param buffer
820 * The buffer to write to. It should already be at the correct
821 * position.
822 */
823 protected void writeSpecificHeader(ByteBuffer buffer) {
824 CoreNodeData extraData = fExtraData;
825 if (extraData != null) {
826 extraData.writeSpecificHeader(buffer);
827 }
828 }
829
830 /**
831 * Node-type-specific toString method. Used for debugging.
832 *
833 * @return A string representing the node
834 */
835 protected String toStringSpecific() {
836 return ""; //$NON-NLS-1$
837 }
838
839 @Override
840 public boolean isEmpty() {
841 return fIntervals.isEmpty();
842 }
843
844 // -------------------------------------------
845 // Core node methods
846 // -------------------------------------------
847
848 @Override
849 public int getNbChildren() {
850 CoreNodeData extraData = fExtraData;
851 if (extraData != null) {
852 return extraData.getNbChildren();
853 }
854 return IHTNode.super.getNbChildren();
855 }
856
857 @Override
858 public int getChild(int index) {
859 CoreNodeData extraData = fExtraData;
860 if (extraData != null) {
861 return extraData.getChild(index);
862 }
863 return IHTNode.super.getChild(index);
864 }
865
866 @Override
867 public int getLatestChild() {
868 CoreNodeData extraData = fExtraData;
869 if (extraData != null) {
870 return extraData.getLatestChild();
871 }
872 return IHTNode.super.getLatestChild();
873 }
874
875 @Override
876 public void linkNewChild(@NonNull IHTNode<E> childNode) {
877 CoreNodeData extraData = fExtraData;
878 if (extraData != null) {
879 extraData.linkNewChild(childNode);
880 return;
881 }
882 IHTNode.super.linkNewChild(childNode);
883 }
884
885 @Override
886 public Collection<Integer> selectNextChildren(RangeCondition<Long> timeCondition)
887 throws RangeException {
888 CoreNodeData extraData = fExtraData;
889 if (extraData != null) {
890 return extraData.selectNextChildren(timeCondition);
891 }
892 return IHTNode.super.selectNextChildren(timeCondition);
893 }
894
895 // -----------------------------------------
896 // Locking
897 // -----------------------------------------
898
899 /**
900 * Takes a read lock on the fields of this class. Each call to this method
901 * should be followed by a {@link HTNode#releaseReadLock()}, in a
902 * try-finally clause
903 */
904 protected void takeReadLock() {
905 fRwl.readLock().lock();
906 }
907
908 /**
909 * Releases a read lock on the fields of this class. A call to this method
910 * should have been preceded by a call to {@link HTNode#takeReadLock()}
911 */
912 protected void releaseReadLock() {
913 fRwl.readLock().unlock();
914 }
915
916 /**
917 * Takes a write lock on the fields of this class. Each call to this method
918 * should be followed by a {@link HTNode#releaseWriteLock()}, in a
919 * try-finally clause
920 */
921 protected void takeWriteLock() {
922 fRwl.writeLock().lock();
923 }
924
925 /**
926 * Releases a write lock on the fields of this class. A call to this method
927 * should have been preceded by a call to {@link HTNode#takeWriteLock()}
928 */
929 protected void releaseWriteLock() {
930 fRwl.writeLock().unlock();
931 }
932
933 // -----------------------------------------
934 // Object methods
935 // -----------------------------------------
936
937 @SuppressWarnings("nls")
938 @Override
939 public String toString() {
940 /* Only used for debugging, shouldn't be externalized */
941 return String.format("Node #%d, %s, %s, %d intervals (%d%% used), [%d - %s]",
942 fSequenceNumber,
943 (fParentSequenceNumber == -1) ? "Root" : "Parent #" + fParentSequenceNumber,
944 toStringSpecific(),
945 fIntervals.size(),
946 getNodeUsagePercent(),
947 fNodeStart,
948 (fIsOnDisk || fNodeEnd != 0) ? fNodeEnd : "...");
949 }
950
951 @Override
952 public int hashCode() {
953 return Objects.hash(fBlockSize, fMaxChildren, fNodeStart, fNodeEnd, fSequenceNumber, fParentSequenceNumber, fExtraData);
954 }
955
956 @Override
957 public boolean equals(@Nullable Object obj) {
958 if (obj == null) {
959 return false;
960 }
961 if (obj.getClass() != getClass()) {
962 return false;
963 }
964 HTNode<? extends IHTInterval> other = (HTNode<?>) obj;
965 return (fBlockSize == other.fBlockSize &&
966 fMaxChildren == other.fMaxChildren &&
967 fNodeStart == other.fNodeStart &&
968 fNodeEnd == other.fNodeEnd &&
969 fSequenceNumber == other.fSequenceNumber &&
970 fParentSequenceNumber == other.fParentSequenceNumber &&
971 Objects.equals(fExtraData, other.fExtraData));
972 }
973
974 // -----------------------------------------
975 // Test-specific methods
976 // -----------------------------------------
977
978 /**
979 * Print all current intervals into the given writer.
980 *
981 * @param writer
982 * The writer to write to
983 */
984 @VisibleForTesting
985 public void debugPrintIntervals(PrintStream writer) {
986 getIntervals().forEach(writer::println);
987 }
988 }
This page took 0.060078 seconds and 5 git commands to generate.