* <p>
*/
@RunWith(Suite.class)
-@Suite.SuiteClasses({ ActivatorTest.class,
- org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider.CtfKernelStateInputTest.class,
- org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider.StateSystemFullHistoryTest.class
+@Suite.SuiteClasses({
+ ActivatorTest.class,
+ org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider.TestAll.class
})
public class AllTests { }
/*******************************************************************************
* Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
*
* All rights reserved. This program and the accompanying materials are
* made available under the terms of the Eclipse Public License v1.0 which
* accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
- *******************************************************************************/
+ * Contributors:
+ * Alexandre Montplaisir - Initial API and implementation
+ ******************************************************************************/
package org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import java.io.File;
-import java.util.List;
-import org.eclipse.linuxtools.internal.lttng2.kernel.core.Attributes;
import org.eclipse.linuxtools.internal.lttng2.kernel.core.stateprovider.CtfKernelStateInput;
-import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
-import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
-import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
import org.eclipse.linuxtools.tmf.core.exceptions.TmfTraceException;
-import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
import org.eclipse.linuxtools.tmf.core.statesystem.IStateChangeInput;
import org.eclipse.linuxtools.tmf.core.statesystem.ITmfStateSystem;
import org.eclipse.linuxtools.tmf.core.statesystem.StateSystemManager;
import org.junit.Test;
/**
- * Unit tests for the StateHistorySystem, which uses a full (non-partial)
- * history and the non-threaded CTF kernel handler.
- *
- * @author alexmont
+ * State system tests using a full history back-end and the LTTng kernel state
+ * input.
*
+ * @author Alexandre Montplaisir
*/
-@SuppressWarnings({"nls", "javadoc"})
-public class StateSystemFullHistoryTest {
-
- static File stateFile;
- static File stateFileBenchmark;
-
- static IStateChangeInput input;
- static ITmfStateSystem ssq;
-
- /* Offset in the trace + start time of the trace */
- private final static long interestingTimestamp1 = 18670067372290L + 1331649577946812237L;
+public class StateSystemFullHistoryTest extends StateSystemTest {
- protected static String getTestFileName() {
- return "/tmp/statefile.ht"; //$NON-NLS-1$
- }
+ private static File stateFile;
+ private static File stateFileBenchmark;
+ /**
+ * Initialize the test cases (build the history file once for all tests).
+ */
@BeforeClass
public static void initialize() {
- stateFile = new File(getTestFileName());
- stateFileBenchmark = new File(getTestFileName() + ".benchmark"); //$NON-NLS-1$
try {
+ stateFile = File.createTempFile("test", ".ht"); //$NON-NLS-1$ //$NON-NLS-2$
+ stateFileBenchmark = File.createTempFile("test", ".ht.benchmark"); //$NON-NLS-1$ //$NON-NLS-2$
+
input = new CtfKernelStateInput(CtfTestFiles.getTestTrace());
ssq = StateSystemManager.loadStateHistory(stateFile, input, true);
} catch (Exception e) {
}
}
+ /**
+ * Delete the temp files after we're done
+ */
@AfterClass
public static void cleanup() {
boolean ret1, ret2;
ret2 = stateFileBenchmark.delete();
if ( !(ret1 && ret2) ) {
System.err.println("Error cleaning up during unit testing, " + //$NON-NLS-1$
- "you might have leftovers state history files in /tmp"); //$NON-NLS-1$
+ "you might have leftovers state history files in /tmp"); //$NON-NLS-1$
}
}
+ // ------------------------------------------------------------------------
+ // Tests specific to a full-history
+ // ------------------------------------------------------------------------
+
/**
* Rebuild independently so we can benchmark it. Too bad JUnit doesn't allow
* us to @Test the @BeforeClass...
*
- * @throws IOException
* @throws TmfTraceException
+ * Fails the test
*/
@Test
public void testBuild() throws TmfTraceException {
assertEquals(CtfTestFiles.endTime, ssb2.getCurrentEndTime());
}
+ /**
+ * Test re-opening the existing file.
+ *
+ * @throws TmfTraceException
+ * Fails the test
+ */
@Test
public void testOpenExistingStateFile() throws TmfTraceException {
ITmfStateSystem ssb2;
assertEquals(CtfTestFiles.endTime, ssb2.getCurrentEndTime());
}
- @Test
- public void testFullQuery1() throws StateValueTypeException,
- AttributeNotFoundException, TimeRangeException,
- StateSystemDisposedException {
-
- List<ITmfStateInterval> list;
- ITmfStateInterval interval;
- int quark, valueInt;
- String valueStr;
-
- list = ssq.queryFullState(interestingTimestamp1);
-
- quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- interval = list.get(quark);
- valueInt = interval.getStateValue().unboxInt();
- assertEquals(1397, valueInt);
-
- quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
- interval = list.get(quark);
- valueStr = interval.getStateValue().unboxStr();
- assertEquals("gdbus", valueStr);
-
- quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.SYSTEM_CALL);
- interval = list.get(quark);
- valueStr = interval.getStateValue().unboxStr();
- assertTrue(valueStr.equals("sys_poll"));
- }
-
- @Test
- public void testFullQuery2() {
- //
- }
-
- @Test
- public void testFullQuery3() {
- //
- }
-
- @Test
- public void testSingleQuery1() throws AttributeNotFoundException,
- TimeRangeException, StateValueTypeException,
- StateSystemDisposedException {
-
- long timestamp = interestingTimestamp1;
- int quark;
- ITmfStateInterval interval;
- String valueStr;
-
- quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
- interval = ssq.querySingleState(timestamp, quark);
- valueStr = interval.getStateValue().unboxStr();
- assertEquals("gdbus", valueStr);
- }
-
- @Test
- public void testSingleQuery2() {
- //
- }
-
- @Test
- public void testSingleQuery3() {
- //
- }
-
- /**
- * Test a range query (with no resolution parameter, so all intervals)
- */
- @Test
- public void testRangeQuery1() throws AttributeNotFoundException,
- TimeRangeException, StateValueTypeException,
- StateSystemDisposedException {
-
- long time1 = interestingTimestamp1;
- long time2 = time1 + 1L * CtfTestFiles.NANOSECS_PER_SEC;
- int quark;
- List<ITmfStateInterval> intervals;
-
- quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- intervals = ssq.queryHistoryRange(quark, time1, time2);
- assertEquals(487, intervals.size()); /* Number of context switches! */
- assertEquals(1685, intervals.get(100).getStateValue().unboxInt());
- assertEquals(1331668248427681372L, intervals.get(205).getEndTime());
- }
-
- /**
- * Range query, but with a t2 far off the end of the trace.
- * The result should still be valid.
- */
- @Test
- public void testRangeQuery2() throws TimeRangeException,
- AttributeNotFoundException, StateSystemDisposedException {
-
- List<ITmfStateInterval> intervals;
-
- int quark = ssq.getQuarkAbsolute(Attributes.RESOURCES, Attributes.IRQS, "1");
- long ts1 = ssq.getStartTime(); /* start of the trace */
- long ts2 = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid, but ignored */
-
- intervals = ssq.queryHistoryRange(quark, ts1, ts2);
-
- /* Activity of IRQ 1 over the whole trace */
- assertEquals(65, intervals.size());
- }
-
- /**
- * Test a range query with a resolution
- */
- @Test
- public void testRangeQuery3() throws AttributeNotFoundException,
- TimeRangeException, StateValueTypeException,
- StateSystemDisposedException {
-
- long time1 = interestingTimestamp1;
- long time2 = time1 + 1L * CtfTestFiles.NANOSECS_PER_SEC;
- long resolution = 1000000; /* One query every millisecond */
- int quark;
- List<ITmfStateInterval> intervals;
-
- quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- intervals = ssq.queryHistoryRange(quark, time1, time2, resolution, null);
- assertEquals(126, intervals.size()); /* Number of context switches! */
- assertEquals(1452, intervals.get(50).getStateValue().unboxInt());
- assertEquals(1331668248815698779L, intervals.get(100).getEndTime());
- }
-
- /**
- * Ask for a time range outside of the trace's range
- */
- @Test(expected = TimeRangeException.class)
- public void testFullQueryInvalidTime1() throws TimeRangeException,
- StateSystemDisposedException {
- long ts = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC;
- ssq.queryFullState(ts);
- }
-
- @Test(expected = TimeRangeException.class)
- public void testFullQueryInvalidTime2() throws TimeRangeException,
- StateSystemDisposedException {
- long ts = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC;
- ssq.queryFullState(ts);
- }
-
- @Test(expected = TimeRangeException.class)
- public void testSingleQueryInvalidTime1()
- throws AttributeNotFoundException, TimeRangeException,
- StateSystemDisposedException {
-
- int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- long ts = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC;
- ssq.querySingleState(ts, quark);
- }
-
- @Test(expected = TimeRangeException.class)
- public void testSingleQueryInvalidTime2()
- throws AttributeNotFoundException, TimeRangeException,
- StateSystemDisposedException {
-
- int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- long ts = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC;
- ssq.querySingleState(ts, quark);
- }
-
- @Test(expected = TimeRangeException.class)
- public void testRangeQueryInvalidTime1() throws AttributeNotFoundException,
- TimeRangeException, StateSystemDisposedException {
-
- int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- long ts1 = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
- long ts2 = CtfTestFiles.startTime + 1L * CtfTestFiles.NANOSECS_PER_SEC; /* valid */
-
- ssq.queryHistoryRange(quark, ts1, ts2);
- }
-
- @Test(expected = TimeRangeException.class)
- public void testRangeQueryInvalidTime2() throws TimeRangeException,
- AttributeNotFoundException, StateSystemDisposedException {
-
- int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- long ts1 = CtfTestFiles.startTime - 1L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
- long ts2 = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
-
- ssq.queryHistoryRange(quark, ts1, ts2);
- }
-
- /**
- * Ask for a non-existing attribute
- *
- * @throws AttributeNotFoundException
- */
- @Test(expected = AttributeNotFoundException.class)
- public void testQueryInvalidAttribute() throws AttributeNotFoundException {
-
- ssq.getQuarkAbsolute("There", "is", "no", "cow", "level");
- }
-
- /**
- * Query but with the wrong State Value type
- */
- @Test(expected = StateValueTypeException.class)
- public void testQueryInvalidValuetype1() throws StateValueTypeException,
- AttributeNotFoundException, TimeRangeException,
- StateSystemDisposedException {
- List<ITmfStateInterval> list;
- ITmfStateInterval interval;
- int quark;
-
- list = ssq.queryFullState(interestingTimestamp1);
- quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- interval = list.get(quark);
-
- /* This is supposed to be an int value */
- interval.getStateValue().unboxStr();
- }
-
- @Test(expected = StateValueTypeException.class)
- public void testQueryInvalidValuetype2() throws StateValueTypeException,
- AttributeNotFoundException, TimeRangeException,
- StateSystemDisposedException {
- List<ITmfStateInterval> list;
- ITmfStateInterval interval;
- int quark;
-
- list = ssq.queryFullState(interestingTimestamp1);
- quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
- interval = list.get(quark);
-
- /* This is supposed to be a String value */
- interval.getStateValue().unboxInt();
- }
-
- @Test
- public void testFullAttributeName() throws AttributeNotFoundException {
- int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
- String name = ssq.getFullAttributePath(quark);
- assertEquals(name, "CPUs/0/Current_thread");
- }
-
- @Test
- public void testGetQuarks_begin() {
- List<Integer> list = ssq.getQuarks("*", "1577", Attributes.EXEC_NAME);
-
- assertEquals(1, list.size());
- }
-
- @Test
- public void testGetQuarks_middle() {
- List<Integer> list = ssq.getQuarks(Attributes.THREADS, "*", Attributes.EXEC_NAME);
-
- /* Number of different kernel threads in the trace */
- assertEquals(168, list.size());
- }
-
- @Test
- public void testGetQuarks_end() {
- List<Integer> list = ssq.getQuarks(Attributes.THREADS, "1577", "*");
-
- /* There should be 4 sub-attributes for each Thread node */
- assertEquals(4, list.size());
- }
}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2013 Ericsson
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Alexandre Montplaisir - Initial API and implementation
+ ******************************************************************************/
+
+package org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider;
+
+import org.eclipse.linuxtools.internal.lttng2.kernel.core.stateprovider.CtfKernelStateInput;
+import org.eclipse.linuxtools.tmf.core.exceptions.TmfTraceException;
+import org.eclipse.linuxtools.tmf.core.statesystem.StateSystemManager;
+import org.junit.BeforeClass;
+
+/**
+ * State system tests using the in-memory back-end.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class StateSystemInMemoryTest extends StateSystemTest {
+
+ /**
+ * Initialization
+ */
+ @BeforeClass
+ public static void initialize() {
+ try {
+ input = new CtfKernelStateInput(CtfTestFiles.getTestTrace());
+ ssq = StateSystemManager.newInMemHistory(input, true);
+ } catch (TmfTraceException e) {
+ e.printStackTrace();
+ }
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.eclipse.linuxtools.internal.lttng2.kernel.core.Attributes;
+import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
+import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
+import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statesystem.IStateChangeInput;
+import org.eclipse.linuxtools.tmf.core.statesystem.ITmfStateSystem;
+import org.junit.Test;
+
+/**
+ * Base unit tests for the StateHistorySystem. Extension can be made to test
+ * different state back-end types or configurations.
+ *
+ * @author Alexandre Montplaisir
+ *
+ */
+@SuppressWarnings({"nls", "javadoc"})
+public abstract class StateSystemTest {
+
+ protected static IStateChangeInput input;
+ protected static ITmfStateSystem ssq;
+
+ /* Offset in the trace + start time of the trace */
+ private static final long interestingTimestamp1 = 18670067372290L + 1331649577946812237L;
+
+ @Test
+ public void testFullQuery1() throws StateValueTypeException,
+ AttributeNotFoundException, TimeRangeException,
+ StateSystemDisposedException {
+
+ List<ITmfStateInterval> list;
+ ITmfStateInterval interval;
+ int quark, valueInt;
+ String valueStr;
+
+ list = ssq.queryFullState(interestingTimestamp1);
+
+ quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ interval = list.get(quark);
+ valueInt = interval.getStateValue().unboxInt();
+ assertEquals(1397, valueInt);
+
+ quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
+ interval = list.get(quark);
+ valueStr = interval.getStateValue().unboxStr();
+ assertEquals("gdbus", valueStr);
+
+ quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.SYSTEM_CALL);
+ interval = list.get(quark);
+ valueStr = interval.getStateValue().unboxStr();
+ assertTrue(valueStr.equals("sys_poll"));
+ }
+
+ @Test
+ public void testSingleQuery1() throws AttributeNotFoundException,
+ TimeRangeException, StateValueTypeException,
+ StateSystemDisposedException {
+
+ long timestamp = interestingTimestamp1;
+ int quark;
+ ITmfStateInterval interval;
+ String valueStr;
+
+ quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
+ interval = ssq.querySingleState(timestamp, quark);
+ valueStr = interval.getStateValue().unboxStr();
+ assertEquals("gdbus", valueStr);
+ }
+
+ /**
+ * Test a range query (with no resolution parameter, so all intervals)
+ */
+ @Test
+ public void testRangeQuery1() throws AttributeNotFoundException,
+ TimeRangeException, StateValueTypeException,
+ StateSystemDisposedException {
+
+ long time1 = interestingTimestamp1;
+ long time2 = time1 + 1L * CtfTestFiles.NANOSECS_PER_SEC;
+ int quark;
+ List<ITmfStateInterval> intervals;
+
+ quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ intervals = ssq.queryHistoryRange(quark, time1, time2);
+ assertEquals(487, intervals.size()); /* Number of context switches! */
+ assertEquals(1685, intervals.get(100).getStateValue().unboxInt());
+ assertEquals(1331668248427681372L, intervals.get(205).getEndTime());
+ }
+
+ /**
+ * Range query, but with a t2 far off the end of the trace.
+ * The result should still be valid.
+ */
+ @Test
+ public void testRangeQuery2() throws TimeRangeException,
+ AttributeNotFoundException, StateSystemDisposedException {
+
+ List<ITmfStateInterval> intervals;
+
+ int quark = ssq.getQuarkAbsolute(Attributes.RESOURCES, Attributes.IRQS, "1");
+ long ts1 = ssq.getStartTime(); /* start of the trace */
+ long ts2 = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid, but ignored */
+
+ intervals = ssq.queryHistoryRange(quark, ts1, ts2);
+
+ /* Activity of IRQ 1 over the whole trace */
+ assertEquals(65, intervals.size());
+ }
+
+ /**
+ * Test a range query with a resolution
+ */
+ @Test
+ public void testRangeQuery3() throws AttributeNotFoundException,
+ TimeRangeException, StateValueTypeException,
+ StateSystemDisposedException {
+
+ long time1 = interestingTimestamp1;
+ long time2 = time1 + 1L * CtfTestFiles.NANOSECS_PER_SEC;
+ long resolution = 1000000; /* One query every millisecond */
+ int quark;
+ List<ITmfStateInterval> intervals;
+
+ quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ intervals = ssq.queryHistoryRange(quark, time1, time2, resolution, null);
+ assertEquals(126, intervals.size()); /* Number of context switches! */
+ assertEquals(1452, intervals.get(50).getStateValue().unboxInt());
+ assertEquals(1331668248815698779L, intervals.get(100).getEndTime());
+ }
+
+ /**
+ * Ask for a time range outside of the trace's range
+ */
+ @Test(expected = TimeRangeException.class)
+ public void testFullQueryInvalidTime1() throws TimeRangeException,
+ StateSystemDisposedException {
+ long ts = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC;
+ ssq.queryFullState(ts);
+ }
+
+ @Test(expected = TimeRangeException.class)
+ public void testFullQueryInvalidTime2() throws TimeRangeException,
+ StateSystemDisposedException {
+ long ts = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC;
+ ssq.queryFullState(ts);
+ }
+
+ @Test(expected = TimeRangeException.class)
+ public void testSingleQueryInvalidTime1()
+ throws AttributeNotFoundException, TimeRangeException,
+ StateSystemDisposedException {
+
+ int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ long ts = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC;
+ ssq.querySingleState(ts, quark);
+ }
+
+ @Test(expected = TimeRangeException.class)
+ public void testSingleQueryInvalidTime2()
+ throws AttributeNotFoundException, TimeRangeException,
+ StateSystemDisposedException {
+
+ int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ long ts = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC;
+ ssq.querySingleState(ts, quark);
+ }
+
+ @Test(expected = TimeRangeException.class)
+ public void testRangeQueryInvalidTime1() throws AttributeNotFoundException,
+ TimeRangeException, StateSystemDisposedException {
+
+ int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ long ts1 = CtfTestFiles.startTime - 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
+ long ts2 = CtfTestFiles.startTime + 1L * CtfTestFiles.NANOSECS_PER_SEC; /* valid */
+
+ ssq.queryHistoryRange(quark, ts1, ts2);
+ }
+
+ @Test(expected = TimeRangeException.class)
+ public void testRangeQueryInvalidTime2() throws TimeRangeException,
+ AttributeNotFoundException, StateSystemDisposedException {
+
+ int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ long ts1 = CtfTestFiles.startTime - 1L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
+ long ts2 = CtfTestFiles.startTime + 20L * CtfTestFiles.NANOSECS_PER_SEC; /* invalid */
+
+ ssq.queryHistoryRange(quark, ts1, ts2);
+ }
+
+ /**
+ * Ask for a non-existing attribute
+ *
+ * @throws AttributeNotFoundException
+ */
+ @Test(expected = AttributeNotFoundException.class)
+ public void testQueryInvalidAttribute() throws AttributeNotFoundException {
+
+ ssq.getQuarkAbsolute("There", "is", "no", "cow", "level");
+ }
+
+ /**
+ * Query but with the wrong State Value type
+ */
+ @Test(expected = StateValueTypeException.class)
+ public void testQueryInvalidValuetype1() throws StateValueTypeException,
+ AttributeNotFoundException, TimeRangeException,
+ StateSystemDisposedException {
+ List<ITmfStateInterval> list;
+ ITmfStateInterval interval;
+ int quark;
+
+ list = ssq.queryFullState(interestingTimestamp1);
+ quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ interval = list.get(quark);
+
+ /* This is supposed to be an int value */
+ interval.getStateValue().unboxStr();
+ }
+
+ @Test(expected = StateValueTypeException.class)
+ public void testQueryInvalidValuetype2() throws StateValueTypeException,
+ AttributeNotFoundException, TimeRangeException,
+ StateSystemDisposedException {
+ List<ITmfStateInterval> list;
+ ITmfStateInterval interval;
+ int quark;
+
+ list = ssq.queryFullState(interestingTimestamp1);
+ quark = ssq.getQuarkAbsolute(Attributes.THREADS, "1432", Attributes.EXEC_NAME);
+ interval = list.get(quark);
+
+ /* This is supposed to be a String value */
+ interval.getStateValue().unboxInt();
+ }
+
+ @Test
+ public void testFullAttributeName() throws AttributeNotFoundException {
+ int quark = ssq.getQuarkAbsolute(Attributes.CPUS, "0", Attributes.CURRENT_THREAD);
+ String name = ssq.getFullAttributePath(quark);
+ assertEquals(name, "CPUs/0/Current_thread");
+ }
+
+ @Test
+ public void testGetQuarks_begin() {
+ List<Integer> list = ssq.getQuarks("*", "1577", Attributes.EXEC_NAME);
+
+ assertEquals(1, list.size());
+ }
+
+ @Test
+ public void testGetQuarks_middle() {
+ List<Integer> list = ssq.getQuarks(Attributes.THREADS, "*", Attributes.EXEC_NAME);
+
+ /* Number of different kernel threads in the trace */
+ assertEquals(168, list.size());
+ }
+
+ @Test
+ public void testGetQuarks_end() {
+ List<Integer> list = ssq.getQuarks(Attributes.THREADS, "1577", "*");
+
+ /* There should be 4 sub-attributes for each Thread node */
+ assertEquals(4, list.size());
+ }
+}
/*******************************************************************************
* Copyright (c) 2012 Ericsson
- *
+ *
* All rights reserved. This program and the accompanying materials are
* made available under the terms of the Eclipse Public License v1.0 which
* accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
- *
+ *
* Contributors:
* Alexandre Montplaisir - Initial implementation
******************************************************************************/
package org.eclipse.linuxtools.lttng2.kernel.core.tests.stateprovider;
-import org.junit.runner.JUnitCore;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
* The class <code>TestAll</code> builds a suite that can be used to run all of
* the tests within its package as well as within any subpackages of its
* package.
- *
+ *
* @author ematkho
* @version $Revision: 1.0 $
*/
@RunWith(Suite.class)
-@Suite.SuiteClasses({ CtfKernelStateInputTest.class,
- StateSystemFullHistoryTest.class})
+@Suite.SuiteClasses({
+ CtfKernelStateInputTest.class,
+ StateSystemFullHistoryTest.class,
+ StateSystemInMemoryTest.class
+})
public class TestAll {
- /**
- * Launch the test.
- *
- * @param args
- * the command line arguments
- */
- public static void main(String[] args) {
- JUnitCore.runClasses(new Class[] { TestAll.class });
- }
}
import junit.framework.TestCase;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.IStateHistoryBackend;
import org.eclipse.linuxtools.internal.tmf.core.statesystem.StateSystem;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree.HistoryTreeBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree.HistoryTreeBackend;
import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
import java.util.List;
import org.eclipse.linuxtools.internal.tmf.core.statesystem.HistoryBuilder;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.IStateHistoryBackend;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree.HistoryTreeBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree.HistoryTreeBackend;
import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
org.eclipse.linuxtools.internal.tmf.core.component;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
org.eclipse.linuxtools.internal.tmf.core.request;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
org.eclipse.linuxtools.internal.tmf.core.statesystem;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
- org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
+ org.eclipse.linuxtools.internal.tmf.core.statesystem.backends;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
+ org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
org.eclipse.linuxtools.internal.tmf.core.trace;x-friends:="org.eclipse.linuxtools.tmf.core.tests",
org.eclipse.linuxtools.tmf.core,
org.eclipse.linuxtools.tmf.core.component,
import java.io.IOException;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
import org.eclipse.linuxtools.tmf.core.component.TmfComponent;
import org.eclipse.linuxtools.tmf.core.event.ITmfEvent;
import org.eclipse.linuxtools.tmf.core.event.TmfTimeRange;
* @param stateChangeInput
* The input plugin to use. This is required.
* @param backend
- * The backend storage to use.
+ * The back-end storage to use.
* @param buildManually
* Should we build this history in-band or not. True means we
* will start the building ourselves and block the caller until
* construction is done. False (out-of-band) means we will start
* listening for the signal and return immediately. Another
* signal will be sent when finished.
- * @throws IOException
- * Is thrown if anything went wrong (usually with the storage
- * backend)
*/
public HistoryBuilder(IStateChangeInput stateChangeInput,
- IStateHistoryBackend backend, boolean buildManually)
- throws IOException {
+ IStateHistoryBackend backend, boolean buildManually) {
if (stateChangeInput == null || backend == null) {
throw new IllegalArgumentException();
}
sci = stateChangeInput;
hb = backend;
- ss = new StateSystem(hb, true);
+ ss = new StateSystem(hb);
sci.assignTargetStateSystem(ss);
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.PrintWriter;
-import java.util.List;
-
-import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
-import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
-import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
-
-/**
- * The main difference between StateSystem and StateHistorySystem is that SHS
- * allows 'seeking' back in time to reload a Current State at a previous time.
- * "How to go back in time" is defined by the implementation of the
- * HistoryBackend.
- *
- * A StateHistorySystem contains one and only one HistoryBackend. If you want to
- * use a paradigm with more than one provider (eg. more or less precision
- * depending on what's asked by the user), implement one wrapper HistoryBackend
- * which can then contain your 2-3 other backends underneath.
- *
- * @author alexmont
- *
- */
-public interface IStateHistoryBackend {
-
- /**
- * Get the start time of this state history. This is usually the same as the
- * start time of the originating trace.
- *
- * @return The start time
- */
- public long getStartTime();
-
- /**
- * Get the current end time of the state history. It will change as the
- * history is being built.
- *
- * @return The end time
- */
- public long getEndTime();
-
- /**
- * Main method to insert state intervals into the history.
- *
- * @param stateStartTime
- * The start time of the interval
- * @param stateEndTime
- * The end time of the interval
- * @param quark
- * The quark of the attribute this interval refers to
- * @param value
- * The StateValue represented by this interval
- * @throws TimeRangeException
- * If the start or end time are invalid
- */
- // FIXME change to IStateInterval?
- public void insertPastState(long stateStartTime, long stateEndTime,
- int quark, ITmfStateValue value) throws TimeRangeException;
-
- /**
- * Indicate to the provider that we are done building the history (so it can
- * close off, stop threads, etc.)
- *
- * @param endTime
- * The end time to assign to this state history. It could be
- * farther in time than the last state inserted, for example.
- * @throws TimeRangeException
- * If the requested time makes no sense.
- */
- public void finishedBuilding(long endTime) throws TimeRangeException;
-
- /**
- * It is the responsibility of the backend to define where to save the
- * Attribute Tree (since it's only useful to "reopen" an Attribute Tree if
- * we have the matching History).
- *
- * This method defines where to read for the attribute tree when opening an
- * already-existing history. Refer to the file format documentation.
- *
- * @return A FileInputStream object pointing to the correct file/location in
- * the file where to read the attribute tree information.
- */
- public FileInputStream supplyAttributeTreeReader();
-
- // FIXME change to FOS too?
- /**
- * Supply the File object to which we will write the attribute tree. The
- * position in this file is supplied by -TreeWriterFilePosition.
- *
- * @return The target File
- */
- public File supplyAttributeTreeWriterFile();
-
- /**
- * Supply the position in the file where we should write the attribute tree
- * when asked to.
- *
- * @return The file position (we will seek() to it)
- */
- public long supplyAttributeTreeWriterFilePosition();
-
- /**
- * Delete any generated files or anything that might have been created by
- * the history backend (either temporary or save files). By calling this, we
- * return to the state as it was before ever building the history.
- *
- * You might not want to call automatically if, for example, you want an
- * index file to persist on disk. This could be limited to actions
- * originating from the user.
- */
- public void removeFiles();
-
- /**
- * Notify the state history back-end that the trace is being closed, so it
- * should release its file descriptors, close its connections, etc.
- */
- public void dispose();
-
- // ------------------------------------------------------------------------
- // Query methods
- // ------------------------------------------------------------------------
-
- /**
- * Complete "give me the state at a given time" method 'currentStateInfo' is
- * an "out" parameter, that is, write to it the needed information and
- * return. DO NOT 'new' currentStateInfo, it will be lost and nothing will
- * be returned!
- *
- * @param currentStateInfo
- * List of StateValues (index == quark) to fill up
- * @param t
- * Target timestamp of the query
- * @throws TimeRangeException
- * If the timestamp is outside of the history/trace
- * @throws StateSystemDisposedException
- * If the state system is disposed while a request is ongoing.
- */
- public void doQuery(List<ITmfStateInterval> currentStateInfo, long t)
- throws TimeRangeException, StateSystemDisposedException;
-
- /**
- * Some providers might want to specify a different way to obtain just a
- * single StateValue instead of updating the whole list. If the method to
- * use is the same, then feel free to just implement this as a wrapper using
- * doQuery().
- *
- * @param t
- * The target timestamp of the query.
- * @param attributeQuark
- * The single attribute for which you want the state interval
- * @return The state interval matching this timestamp/attribute pair
- * @throws TimeRangeException
- * If the timestamp was invalid
- * @throws AttributeNotFoundException
- * If the quark was invalid
- * @throws StateSystemDisposedException
- * If the state system is disposed while a request is ongoing.
- */
- public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
- throws TimeRangeException, AttributeNotFoundException,
- StateSystemDisposedException;
-
- /**
- * Simple check to make sure the requested timestamps are within the borders
- * of this state history. This is used internally, but could also be used by
- * the request sender (to check before sending in a lot of requests for
- * example).
- *
- * @param t
- * The queried timestamp
- * @return True if the timestamp is within range, false if not.
- */
- public boolean checkValidTime(long t);
-
- /**
- * Debug method to print the contents of the history backend.
- *
- * @param writer
- * The PrintWriter where to write the output
- */
- public void debugPrint(PrintWriter writer);
-}
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.linuxtools.internal.tmf.core.Activator;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
private boolean buildCancelled = false;
private boolean isDisposed = false;
+ /**
+ * New-file constructor. For when you build a state system with a new file,
+ * or if the back-end does not require a file on disk.
+ *
+ * @param backend
+ * Back-end plugin to use
+ */
+ public StateSystem(IStateHistoryBackend backend) {
+ this.backend = backend;
+ this.transState = new TransientState(backend);
+ this.attributeTree = new AttributeTree(this);
+ }
+
/**
* General constructor
*
* @param backend
- * The "state history storage" backend to use.
+ * The "state history storage" back-end to use.
* @param newFile
* Put true if this is a new history started from scratch. It is
* used to tell the state system where to get its attribute tree.
import java.util.ArrayList;
import java.util.List;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.PrintWriter;
+import java.util.List;
+
+import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
+import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+
+/**
+ * The main difference between StateSystem and StateHistorySystem is that SHS
+ * allows 'seeking' back in time to reload a Current State at a previous time.
+ * "How to go back in time" is defined by the implementation of the
+ * HistoryBackend.
+ *
+ * A StateHistorySystem contains one and only one HistoryBackend. If you want to
+ * use a paradigm with more than one provider (eg. more or less precision
+ * depending on what's asked by the user), implement one wrapper HistoryBackend
+ * which can then contain your 2-3 other backends underneath.
+ *
+ * @author alexmont
+ *
+ */
+public interface IStateHistoryBackend {
+
+ /**
+ * Get the start time of this state history. This is usually the same as the
+ * start time of the originating trace.
+ *
+ * @return The start time
+ */
+ public long getStartTime();
+
+ /**
+ * Get the current end time of the state history. It will change as the
+ * history is being built.
+ *
+ * @return The end time
+ */
+ public long getEndTime();
+
+ /**
+ * Main method to insert state intervals into the history.
+ *
+ * @param stateStartTime
+ * The start time of the interval
+ * @param stateEndTime
+ * The end time of the interval
+ * @param quark
+ * The quark of the attribute this interval refers to
+ * @param value
+ * The StateValue represented by this interval
+ * @throws TimeRangeException
+ * If the start or end time are invalid
+ */
+ // FIXME change to IStateInterval?
+ public void insertPastState(long stateStartTime, long stateEndTime,
+ int quark, ITmfStateValue value) throws TimeRangeException;
+
+ /**
+ * Indicate to the provider that we are done building the history (so it can
+ * close off, stop threads, etc.)
+ *
+ * @param endTime
+ * The end time to assign to this state history. It could be
+ * farther in time than the last state inserted, for example.
+ * @throws TimeRangeException
+ * If the requested time makes no sense.
+ */
+ public void finishedBuilding(long endTime) throws TimeRangeException;
+
+ /**
+ * It is the responsibility of the backend to define where to save the
+ * Attribute Tree (since it's only useful to "reopen" an Attribute Tree if
+ * we have the matching History).
+ *
+ * This method defines where to read for the attribute tree when opening an
+ * already-existing history. Refer to the file format documentation.
+ *
+ * @return A FileInputStream object pointing to the correct file/location in
+ * the file where to read the attribute tree information.
+ */
+ public FileInputStream supplyAttributeTreeReader();
+
+ // FIXME change to FOS too?
+ /**
+ * Supply the File object to which we will write the attribute tree. The
+ * position in this file is supplied by -TreeWriterFilePosition.
+ *
+ * @return The target File
+ */
+ public File supplyAttributeTreeWriterFile();
+
+ /**
+ * Supply the position in the file where we should write the attribute tree
+ * when asked to.
+ *
+ * @return The file position (we will seek() to it)
+ */
+ public long supplyAttributeTreeWriterFilePosition();
+
+ /**
+ * Delete any generated files or anything that might have been created by
+ * the history backend (either temporary or save files). By calling this, we
+ * return to the state as it was before ever building the history.
+ *
+ * You might not want to call automatically if, for example, you want an
+ * index file to persist on disk. This could be limited to actions
+ * originating from the user.
+ */
+ public void removeFiles();
+
+ /**
+ * Notify the state history back-end that the trace is being closed, so it
+ * should release its file descriptors, close its connections, etc.
+ */
+ public void dispose();
+
+ // ------------------------------------------------------------------------
+ // Query methods
+ // ------------------------------------------------------------------------
+
+ /**
+ * Complete "give me the state at a given time" method 'currentStateInfo' is
+ * an "out" parameter, that is, write to it the needed information and
+ * return. DO NOT 'new' currentStateInfo, it will be lost and nothing will
+ * be returned!
+ *
+ * @param currentStateInfo
+ * List of StateValues (index == quark) to fill up
+ * @param t
+ * Target timestamp of the query
+ * @throws TimeRangeException
+ * If the timestamp is outside of the history/trace
+ * @throws StateSystemDisposedException
+ * If the state system is disposed while a request is ongoing.
+ */
+ public void doQuery(List<ITmfStateInterval> currentStateInfo, long t)
+ throws TimeRangeException, StateSystemDisposedException;
+
+ /**
+ * Some providers might want to specify a different way to obtain just a
+ * single StateValue instead of updating the whole list. If the method to
+ * use is the same, then feel free to just implement this as a wrapper using
+ * doQuery().
+ *
+ * @param t
+ * The target timestamp of the query.
+ * @param attributeQuark
+ * The single attribute for which you want the state interval
+ * @return The state interval matching this timestamp/attribute pair
+ * @throws TimeRangeException
+ * If the timestamp was invalid
+ * @throws AttributeNotFoundException
+ * If the quark was invalid
+ * @throws StateSystemDisposedException
+ * If the state system is disposed while a request is ongoing.
+ */
+ public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
+ throws TimeRangeException, AttributeNotFoundException,
+ StateSystemDisposedException;
+
+ /**
+ * Simple check to make sure the requested timestamps are within the borders
+ * of this state history. This is used internally, but could also be used by
+ * the request sender (to check before sending in a lot of requests for
+ * example).
+ *
+ * @param t
+ * The queried timestamp
+ * @return True if the timestamp is within range, false if not.
+ */
+ public boolean checkValidTime(long t);
+
+ /**
+ * Debug method to print the contents of the history backend.
+ *
+ * @param writer
+ * The PrintWriter where to write the output
+ */
+ public void debugPrint(PrintWriter writer);
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2013 Ericsson
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Alexandre Montplaisir - Initial API and implementation
+ ******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import org.eclipse.linuxtools.tmf.core.exceptions.AttributeNotFoundException;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.interval.TmfIntervalEndComparator;
+import org.eclipse.linuxtools.tmf.core.interval.TmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+
+/**
+ * State history back-end that stores its intervals in RAM only. It cannot be
+ * saved to disk, which means we need to rebuild it every time we re-open a
+ * trace. But it's relatively quick to build, so this shouldn't be a problem in
+ * most cases.
+ *
+ * This should only be used with very small state histories (and/or, very small
+ * traces). Since it's stored in standard Collections, it's limited to 2^31
+ * intervals.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class InMemoryBackend implements IStateHistoryBackend {
+
+ private final static Comparator<ITmfStateInterval> endComparator =
+ new TmfIntervalEndComparator();
+
+ private final List<ITmfStateInterval> intervals;
+ private final long startTime;
+ private long latestTime;
+
+ /**
+ * Constructor
+ *
+ * @param startTime
+ * The start time of this interval store
+ */
+ public InMemoryBackend(long startTime) {
+ this.startTime = startTime;
+ this.latestTime = startTime;
+ this.intervals = new ArrayList<ITmfStateInterval>();
+ }
+
+ @Override
+ public long getStartTime() {
+ return startTime;
+ }
+
+ @Override
+ public long getEndTime() {
+ return latestTime;
+ }
+
+ @Override
+ public void insertPastState(long stateStartTime, long stateEndTime,
+ int quark, ITmfStateValue value) throws TimeRangeException {
+ /* Make sure the passed start/end times make sense */
+ if (stateStartTime > stateEndTime || stateStartTime < startTime) {
+ throw new TimeRangeException();
+ }
+
+ ITmfStateInterval interval = new TmfStateInterval(stateStartTime, stateEndTime, quark, value);
+
+ /* Update the "latest seen time" */
+ if (stateEndTime > latestTime) {
+ latestTime = stateEndTime;
+ }
+
+ /* Add the interval into the-array */
+ intervals.add(interval);
+ }
+
+
+ @Override
+ public void doQuery(List<ITmfStateInterval> currentStateInfo, long t)
+ throws TimeRangeException {
+ if (!checkValidTime(t)) {
+ throw new TimeRangeException();
+ }
+
+ /*
+ * The intervals are sorted by end time, so we can binary search to get
+ * the first possible interval, then only compare their start times.
+ */
+ ITmfStateInterval entry;
+ for (int i = binarySearchEndTime(intervals, t); i < intervals.size(); i++) {
+ entry = intervals.get(i);
+ if (entry.getStartTime() <= t) {
+ /* Add this interval to the returned values */
+ currentStateInfo.set(entry.getAttribute(), entry);
+ }
+ }
+ }
+
+ @Override
+ public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
+ throws TimeRangeException, AttributeNotFoundException {
+ if (!checkValidTime(t)) {
+ throw new TimeRangeException();
+ }
+
+ /*
+ * The intervals are sorted by end time, so we can binary search to get
+ * the first possible interval, then only compare their start times.
+ */
+ ITmfStateInterval entry;
+ for (int i = binarySearchEndTime(intervals, t); i < intervals.size(); i++) {
+ entry = intervals.get(i);
+ if (entry.getStartTime() <= t && entry.getAttribute() == attributeQuark) {
+ /* This is the droid we are looking for */
+ return entry;
+ }
+ }
+ throw new AttributeNotFoundException();
+ }
+
+ @Override
+ public boolean checkValidTime(long t) {
+ if (t >= startTime && t <= latestTime) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void finishedBuilding(long endTime) throws TimeRangeException {
+ /* Nothing to do */
+ }
+
+ @Override
+ public FileInputStream supplyAttributeTreeReader() {
+ /* Saving to disk not supported */
+ return null;
+ }
+
+ @Override
+ public File supplyAttributeTreeWriterFile() {
+ /* Saving to disk not supported */
+ return null;
+ }
+
+ @Override
+ public long supplyAttributeTreeWriterFilePosition() {
+ /* Saving to disk not supported */
+ return -1;
+ }
+
+ @Override
+ public void removeFiles() {
+ /* Nothing to do */
+ }
+
+ @Override
+ public void dispose() {
+ /* Nothing to do */
+ }
+
+ @Override
+ public void debugPrint(PrintWriter writer) {
+ writer.println(intervals.toString());
+ }
+
+ private static int binarySearchEndTime(List<ITmfStateInterval> list, long time) {
+ ITmfStateInterval dummyInterval = new TmfStateInterval(-1, time, -1, null);
+ int mid = Collections.binarySearch(list, dummyInterval, endComparator);
+
+ /* The returned value is < 0 if the exact key was not found. */
+ if (mid < 0) {
+ mid = -mid;
+ }
+
+ /*
+ * Collections.binarySearch doesn't guarantee which element is returned
+ * if it falls on one of many equal ones. So make sure we are at the
+ * first one provided.
+ */
+ while ((mid > 0) &&
+ (list.get(mid).getEndTime() == list.get(mid-1).getEndTime())) {
+ mid--;
+ }
+ return mid;
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2013 Ericsson
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Alexandre Montplaisir - Initial API and implementation
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.PrintWriter;
+import java.util.List;
+
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.StateSystem;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+
+/**
+ * An implement of a state history back-end to simply discards *all* the
+ * intervals it receives. Obviously, no queries can be done on it. It is useful
+ * for using with a {@link StateSystem} on which you will only want to do
+ * "ongoing" requests.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class NullBackend implements IStateHistoryBackend {
+
+ /**
+ * Constructor
+ */
+ public NullBackend() {}
+
+ @Override
+ public long getStartTime() {
+ return 0;
+ }
+
+ @Override
+ public long getEndTime() {
+ return 0;
+ }
+
+ /**
+ * The interval will be discarded when using a null backend.
+ */
+ @Override
+ public void insertPastState(long stateStartTime, long stateEndTime,
+ int quark, ITmfStateValue value) {
+ /* The interval is always discarded. */
+ }
+
+ @Override
+ public void finishedBuilding(long endTime) {
+ /* Nothing to do */
+ }
+
+ @Override
+ public FileInputStream supplyAttributeTreeReader() {
+ return null;
+ }
+
+ @Override
+ public File supplyAttributeTreeWriterFile() {
+ return null;
+ }
+
+ @Override
+ public long supplyAttributeTreeWriterFilePosition() {
+ return -1;
+ }
+
+ @Override
+ public void removeFiles() {
+ /* Nothing to do */
+ }
+
+ @Override
+ public void dispose() {
+ /* Nothing to do */
+ }
+
+ /**
+ * Null back-ends cannot run queries. Nothing will be put in
+ * currentStateInfo.
+ */
+ @Override
+ public void doQuery(List<ITmfStateInterval> currentStateInfo, long t) {
+ /* Cannot do past queries */
+ }
+
+ /**
+ * Null back-ends cannot run queries. 'null' will be returned.
+ *
+ * @return Always returns null.
+ */
+ @Override
+ public ITmfStateInterval doSingularQuery(long t, int attributeQuark) {
+ /* Cannot do past queries */
+ return null;
+ }
+
+ /**
+ * Null back-ends cannot run queries.
+ *
+ * @return Always returns false.
+ */
+ @Override
+ public boolean checkValidTime(long t) {
+ /* Cannot do past queries */
+ return false;
+ }
+
+ @Override
+ public void debugPrint(PrintWriter writer) {
+ writer.println("Null history backend"); //$NON-NLS-1$
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A Core node is a first-level node of a History Tree which is not a leaf node.
+ *
+ * It extends HTNode by adding support for child nodes, and also extensions.
+ *
+ * @author alexmont
+ *
+ */
+class CoreNode extends HTNode {
+
+ /* Nb. of children this node has */
+ private int nbChildren;
+
+ /* Seq. numbers of the children nodes (size = MAX_NB_CHILDREN) */
+ private int[] children;
+
+ /* Start times of each of the children (size = MAX_NB_CHILDREN) */
+ private long[] childStart;
+
+ /* Seq number of this node's extension. -1 if none */
+ private int extension;
+
+ /**
+ * Initial constructor. Use this to initialize a new EMPTY node.
+ *
+ * @param tree
+ * The HistoryTree to which this node belongs
+ * @param seqNumber
+ * The (unique) sequence number assigned to this particular node
+ * @param parentSeqNumber
+ * The sequence number of this node's parent node
+ * @param start
+ * The earliest timestamp stored in this node
+ */
+ CoreNode(HistoryTree tree, int seqNumber, int parentSeqNumber,
+ long start) {
+ super(tree, seqNumber, parentSeqNumber, start);
+ this.nbChildren = 0;
+
+ /*
+ * We instantiate the two following arrays at full size right away,
+ * since we want to reserve that space in the node's header.
+ * "this.nbChildren" will tell us how many relevant entries there are in
+ * those tables.
+ */
+ this.children = new int[ownerTree.config.maxChildren];
+ this.childStart = new long[ownerTree.config.maxChildren];
+ }
+
+ @Override
+ protected void readSpecificHeader(ByteBuffer buffer) {
+ int i;
+
+ extension = buffer.getInt();
+ nbChildren = buffer.getInt();
+
+ children = new int[ownerTree.config.maxChildren];
+ for (i = 0; i < nbChildren; i++) {
+ children[i] = buffer.getInt();
+ }
+ for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
+ buffer.getInt();
+ }
+
+ this.childStart = new long[ownerTree.config.maxChildren];
+ for (i = 0; i < nbChildren; i++) {
+ childStart[i] = buffer.getLong();
+ }
+ for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
+ buffer.getLong();
+ }
+ }
+
+ @Override
+ protected void writeSpecificHeader(ByteBuffer buffer) {
+ int i;
+
+ buffer.putInt(extension);
+ buffer.putInt(nbChildren);
+
+ /* Write the "children's seq number" array */
+ for (i = 0; i < nbChildren; i++) {
+ buffer.putInt(children[i]);
+ }
+ for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
+ buffer.putInt(0);
+ }
+
+ /* Write the "children's start times" array */
+ for (i = 0; i < nbChildren; i++) {
+ buffer.putLong(childStart[i]);
+ }
+ for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
+ buffer.putLong(0);
+ }
+ }
+
+ int getNbChildren() {
+ return nbChildren;
+ }
+
+ int getChild(int index) {
+ return children[index];
+ }
+
+ int getLatestChild() {
+ return children[nbChildren - 1];
+ }
+
+ long getChildStart(int index) {
+ return childStart[index];
+ }
+
+ long getLatestChildStart() {
+ return childStart[nbChildren - 1];
+ }
+
+ int getExtensionSequenceNumber() {
+ return extension;
+ }
+
+ /**
+ * Tell this node that it has a new child (Congrats!)
+ *
+ * @param childNode
+ * The SHTNode object of the new child
+ */
+ void linkNewChild(CoreNode childNode) {
+ assert (this.nbChildren < ownerTree.config.maxChildren);
+
+ this.children[nbChildren] = childNode.getSequenceNumber();
+ this.childStart[nbChildren] = childNode.getNodeStart();
+ this.nbChildren++;
+ }
+
+ @Override
+ protected byte getNodeType() {
+ return 1;
+ }
+
+ @Override
+ protected int getTotalHeaderSize() {
+ int specificSize;
+ specificSize = 4 /* 1x int (extension node) */
+ + 4 /* 1x int (nbChildren) */
+
+ /* MAX_NB * int ('children' table) */
+ + 4 * ownerTree.config.maxChildren
+
+ /* MAX_NB * Timevalue ('childStart' table) */
+ + 8 * ownerTree.config.maxChildren;
+
+ return getCommonHeaderSize() + specificSize;
+ }
+
+ @Override
+ protected String toStringSpecific() {
+ /* Only used for debugging, shouldn't be externalized */
+ return "Core Node, " + nbChildren + " children, "; //$NON-NLS-1$ //$NON-NLS-2$
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.File;
+
+/**
+ * Configuration object for a StateHistoryTree.
+ *
+ * @author alexmont
+ *
+ */
+final class HTConfig {
+
+ public final File stateFile;
+ public final int blockSize;
+ public final int maxChildren;
+ public final long treeStart;
+
+ HTConfig(File newStateFile, int blockSize, int maxChildren, long startTime) {
+ this.stateFile = newStateFile;
+ this.blockSize = blockSize;
+ this.maxChildren = maxChildren;
+ this.treeStart = startTime;
+ }
+
+ /**
+ * Version using default values for blocksize and maxchildren
+ *
+ * @param stateFileName
+ * @param startTime
+ */
+ HTConfig(File newStateFile, long startTime) {
+ this(newStateFile, 64 * 1024, 50, startTime);
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
+
+/**
+ * The interval component, which will be contained in a node of the History
+ * Tree.
+ *
+ * @author alexmont
+ *
+ */
+final class HTInterval implements ITmfStateInterval, Comparable<HTInterval> {
+
+ private final long start;
+ private final long end;
+ private final int attribute;
+ private final TmfStateValue sv;
+
+ /*
+ * Size of the strings section entry used by this interval (= 0 if not used)
+ */
+ private final int stringsEntrySize;
+
+ /**
+ * Standard constructor
+ *
+ * @param intervalStart
+ * @param intervalEnd
+ * @param attribute
+ * @param value
+ * @throws TimeRangeException
+ */
+ HTInterval(long intervalStart, long intervalEnd, int attribute,
+ TmfStateValue value) throws TimeRangeException {
+ if (intervalStart > intervalEnd) {
+ throw new TimeRangeException();
+ }
+
+ this.start = intervalStart;
+ this.end = intervalEnd;
+ this.attribute = attribute;
+ this.sv = value;
+ this.stringsEntrySize = computeStringsEntrySize();
+ }
+
+ /**
+ * Reader constructor. Builds the interval using an already-allocated
+ * ByteBuffer, which normally comes from a NIO FileChannel.
+ *
+ * @param buffer
+ * The ByteBuffer from which to read the information
+ * @throws IOException
+ */
+ final static HTInterval readFrom(ByteBuffer buffer) throws IOException {
+ HTInterval interval;
+ long intervalStart, intervalEnd;
+ int attribute;
+ TmfStateValue value;
+ int valueOrOffset, valueSize, res;
+ byte valueType;
+ byte array[];
+
+ /* Read the Data Section entry */
+ intervalStart = buffer.getLong();
+ intervalEnd = buffer.getLong();
+ attribute = buffer.getInt();
+
+ /* Read the 'type' of the value, then react accordingly */
+ valueType = buffer.get();
+ if (valueType <= 0) {
+ /* the type of ValueOrOffset is 'value' */
+ valueOrOffset = buffer.getInt();
+ if (valueOrOffset == -1) {
+ /* Null value */
+ value = TmfStateValue.nullValue();
+ } else {
+ /* Normal integer value */
+ value = TmfStateValue.newValueInt(valueOrOffset);
+ }
+
+ } else { // valueType > 0
+ /* the type is 'offset' */
+ valueOrOffset = buffer.getInt();
+
+ /*
+ * Go read the corresponding entry in the Strings section of the
+ * block
+ */
+ buffer.mark();
+ buffer.position(valueOrOffset);
+
+ /* the first byte = the size to read */
+ valueSize = buffer.get();
+
+ /*
+ * Careful though, 'valueSize' is the total size of the entry,
+ * including the 'size' byte at the start and end (0'ed) byte at the
+ * end. Here we want 'array' to only contain the real payload of the
+ * value.
+ */
+ array = new byte[valueSize - 2];
+ buffer.get(array);
+ value = TmfStateValue.newValueString(new String(array));
+
+ /* Confirm the 0'ed byte at the end */
+ res = buffer.get();
+ if (res != 0) {
+ throw new IOException(
+ "Invalid interval data. Maybe your file is corrupt?"); //$NON-NLS-1$
+ }
+
+ /*
+ * Restore the file pointer's position (so we can read the next
+ * interval)
+ */
+ buffer.reset();
+ }
+
+ try {
+ interval = new HTInterval(intervalStart, intervalEnd, attribute,
+ value);
+ } catch (TimeRangeException e) {
+ throw new IOException(
+ "Invalid interval data. Maybe your file is corrupt?"); //$NON-NLS-1$
+ }
+ return interval;
+ }
+
+ /**
+ * Antagonist of the previous constructor, write the Data entry
+ * corresponding to this interval in a ByteBuffer (mapped to a block in the
+ * history-file, hopefully)
+ *
+ * @param buffer
+ * The already-allocated ByteBuffer corresponding to a SHT Node
+ * @param endPosOfStringEntry
+ * The initial (before calling this function for this interval)
+ * position of the Strings Entry for this node. This will change
+ * from one call to the other if we're writing String
+ * StateValues.
+ * @return The size of the Strings Entry that was written, if any.
+ */
+ int writeInterval(ByteBuffer buffer, int endPosOfStringEntry) {
+ int sizeOfStringEntry;
+ byte[] byteArrayToWrite;
+
+ buffer.putLong(start);
+ buffer.putLong(end);
+ buffer.putInt(attribute);
+ buffer.put(sv.getType());
+
+ byteArrayToWrite = sv.toByteArray();
+
+ if (byteArrayToWrite == null) {
+ /* We write the 'valueOffset' field as a straight value. In the case
+ * of a null value, it will be unboxed as -1 */
+ try {
+ buffer.putInt(sv.unboxInt());
+ } catch (StateValueTypeException e) {
+ /*
+ * This should not happen, since the value told us it was of
+ * type Null or Integer (corrupted value?)
+ */
+ e.printStackTrace();
+ }
+ return 0; /* we didn't use a Strings section entry */
+
+ }
+ /*
+ * Size to write (+2 = +1 for size at the start, +1 for the 0 at the
+ * end)
+ */
+ sizeOfStringEntry = byteArrayToWrite.length + 2;
+
+ /* we use the valueOffset as an offset. */
+ buffer.putInt(endPosOfStringEntry - sizeOfStringEntry);
+ buffer.mark();
+ buffer.position(endPosOfStringEntry - sizeOfStringEntry);
+
+ /*
+ * write the Strings entry (1st byte = size, then the bytes, then the 0)
+ */
+ buffer.put((byte) sizeOfStringEntry);
+ buffer.put(byteArrayToWrite);
+ buffer.put((byte) 0);
+ assert (buffer.position() == endPosOfStringEntry);
+ buffer.reset();
+ return sizeOfStringEntry;
+ }
+
+ @Override
+ public long getStartTime() {
+ return start;
+ }
+
+ @Override
+ public long getEndTime() {
+ return end;
+ }
+
+ @Override
+ public long getViewerEndTime() {
+ return end + 1;
+ }
+
+ @Override
+ public int getAttribute() {
+ return attribute;
+ }
+
+ @Override
+ public ITmfStateValue getStateValue() {
+ return sv;
+ }
+
+ @Override
+ public boolean intersects(long timestamp) {
+ if (start <= timestamp) {
+ if (end >= timestamp) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ int getStringsEntrySize() {
+ return stringsEntrySize;
+ }
+
+ /**
+ * Total serialized size of this interval
+ *
+ * @return
+ */
+ int getIntervalSize() {
+ return stringsEntrySize + HTNode.getDataEntrySize();
+ }
+
+ private int computeStringsEntrySize() {
+ if (sv.toByteArray() == null) {
+ return 0;
+ }
+ return sv.toByteArray().length + 2;
+ /* (+1 for the first byte indicating the size, +1 for the 0'ed byte) */
+ }
+
+ /**
+ * Compare the END TIMES of different intervals. This is used to sort the
+ * intervals when we close down a node.
+ */
+ @Override
+ public int compareTo(HTInterval other) {
+ if (this.end < other.end) {
+ return -1;
+ } else if (this.end > other.end) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof HTInterval) {
+ if (this.compareTo((HTInterval) other) == 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ /* Only for debug, should not be externalized */
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ sb.append(start);
+ sb.append(", "); //$NON-NLS-1$
+ sb.append(end);
+ sb.append(']');
+
+ sb.append(", attribute = "); //$NON-NLS-1$
+ sb.append(attribute);
+
+ sb.append(", value = "); //$NON-NLS-1$
+ sb.append(sv.toString());
+
+ return sb.toString();
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
+
+/**
+ * The base class for all the types of nodes that go in the History Tree.
+ *
+ * @author alexmont
+ *
+ */
+abstract class HTNode {
+
+ /* Reference to the History Tree to whom this node belongs */
+ protected final HistoryTree ownerTree;
+
+ /* Time range of this node */
+ private final long nodeStart;
+ private long nodeEnd;
+
+ /* Sequence number = position in the node section of the file */
+ private final int sequenceNumber;
+ private int parentSequenceNumber; /* = -1 if this node is the root node */
+
+ /* Where the Strings section begins (from the start of the node */
+ private int stringSectionOffset;
+
+ /* True if this node is closed (and to be committed to disk) */
+ private boolean isDone;
+
+ /* Vector containing all the intervals contained in this node */
+ private final ArrayList<HTInterval> intervals;
+
+ HTNode(HistoryTree tree, int seqNumber, int parentSeqNumber, long start) {
+ this.ownerTree = tree;
+ this.nodeStart = start;
+ this.sequenceNumber = seqNumber;
+ this.parentSequenceNumber = parentSeqNumber;
+
+ this.stringSectionOffset = ownerTree.config.blockSize;
+ this.isDone = false;
+ this.intervals = new ArrayList<HTInterval>();
+ }
+
+ /**
+ * Reader factory constructor. Build a Node object (of the right type) by
+ * reading a block in the file.
+ *
+ * @param tree
+ * Reference to the HT which will own this node
+ * @param fc
+ * FileChannel to the history file, ALREADY SEEKED at the start
+ * of the node.
+ * @throws IOException
+ */
+ final static HTNode readNode(HistoryTree tree, FileChannel fc)
+ throws IOException {
+ HTNode newNode = null;
+ int res, i;
+
+ ByteBuffer buffer = ByteBuffer.allocate(tree.config.blockSize);
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.clear();
+ res = fc.read(buffer);
+ assert (res == tree.config.blockSize);
+ // This often breaks, so might as well keep this code not too far...
+ // if ( res != tree.config.blockSize ) {
+ // tree.debugPrintFullTree(new PrintWriter(System.out, true), null,
+ // false);
+ // assert ( false );
+ // }
+ buffer.flip();
+
+ /* Read the common header part */
+ byte type = buffer.get();
+ long start = buffer.getLong();
+ long end = buffer.getLong();
+ int seqNb = buffer.getInt();
+ int parentSeqNb = buffer.getInt();
+ int intervalCount = buffer.getInt();
+ int stringSectionOffset = buffer.getInt();
+ boolean done = byteToBool(buffer.get());
+
+ /* Now the rest of the header depends on the node type */
+ switch (type) {
+ case 1:
+ /* Core nodes */
+ newNode = new CoreNode(tree, seqNb, parentSeqNb, start);
+ newNode.readSpecificHeader(buffer);
+ break;
+
+ // TODO implement other node types
+ // case 2:
+ // /* Leaf nodes */
+ //
+ // break;
+ //
+ //
+ // case 3:
+ // /* "Claudette" (extended) nodes */
+ //
+ // break;
+
+ default:
+ /* Unrecognized node type */
+ throw new IOException();
+ }
+
+ /*
+ * At this point, we should be done reading the header and 'buffer'
+ * should only have the intervals left
+ */
+ for (i = 0; i < intervalCount; i++) {
+ newNode.intervals.add(HTInterval.readFrom(buffer));
+ }
+
+ /* Assign the node's other information we have read previously */
+ newNode.nodeEnd = end;
+ newNode.stringSectionOffset = stringSectionOffset;
+ newNode.isDone = done;
+
+ return newNode;
+ }
+
+ final void writeSelf(FileChannel fc) throws IOException {
+ int res, size;
+ int curStringsEntryEndPos = ownerTree.config.blockSize;
+
+ ByteBuffer buffer = ByteBuffer.allocate(ownerTree.config.blockSize);
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.clear();
+
+ /* Write the common header part */
+ buffer.put(this.getNodeType());
+ buffer.putLong(nodeStart);
+ buffer.putLong(nodeEnd);
+ buffer.putInt(sequenceNumber);
+ buffer.putInt(parentSequenceNumber);
+ buffer.putInt(intervals.size());
+ buffer.putInt(stringSectionOffset);
+ buffer.put(boolToByte(isDone));
+
+ /* Now call the inner method to write the specific header part */
+ this.writeSpecificHeader(buffer);
+
+ /* Back to us, we write the intervals */
+ for (HTInterval interval : intervals) {
+ size = interval.writeInterval(buffer, curStringsEntryEndPos);
+ curStringsEntryEndPos -= size;
+ }
+
+ /*
+ * Write padding between the end of the Data section and the start of
+ * the Strings section (needed to fill the node in case there is no
+ * Strings section)
+ */
+ while (buffer.position() < stringSectionOffset) {
+ buffer.put((byte) 0);
+ }
+
+ /*
+ * If the offsets were right, the size of the Strings section should be
+ * == to the expected size
+ */
+ assert (curStringsEntryEndPos == stringSectionOffset);
+
+ /* Finally, write everything in the Buffer to disk */
+
+ // if we don't do this, flip() will lose what's after.
+ buffer.position(ownerTree.config.blockSize);
+
+ buffer.flip();
+ res = fc.write(buffer);
+ assert (res == ownerTree.config.blockSize);
+ }
+
+ /**
+ * Accessors
+ */
+ long getNodeStart() {
+ return nodeStart;
+ }
+
+ long getNodeEnd() {
+ if (this.isDone) {
+ return nodeEnd;
+ }
+ return 0;
+ }
+
+ int getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ int getParentSequenceNumber() {
+ return parentSequenceNumber;
+ }
+
+ /**
+ * Change this node's parent. Used when we create a new root node for
+ * example.
+ */
+ void setParentSequenceNumber(int newParent) {
+ parentSequenceNumber = newParent;
+ }
+
+ boolean isDone() {
+ return isDone;
+ }
+
+ /**
+ * Add an interval to this node
+ *
+ * @param newInterval
+ */
+ void addInterval(HTInterval newInterval) {
+ /* Just in case, but should be checked before even calling this function */
+ assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
+
+ intervals.add(newInterval);
+
+ /* Update the in-node offset "pointer" */
+ stringSectionOffset -= (newInterval.getStringsEntrySize());
+ }
+
+ /**
+ * We've received word from the containerTree that newest nodes now exist to
+ * our right. (Puts isDone = true and sets the endtime)
+ *
+ * @param endtime
+ * The nodeEnd time that the node will have
+ * @throws TimeRangeException
+ */
+ void closeThisNode(long endtime) {
+ assert (endtime >= this.nodeStart);
+ // /* This also breaks often too */
+ // if ( endtime.getValue() <= this.nodeStart.getValue() ) {
+ // ownerTree.debugPrintFullTree(new PrintWriter(System.out, true), null,
+ // false);
+ // assert ( false );
+ // }
+
+ if (intervals.size() > 0) {
+ /*
+ * Sort the intervals by ascending order of their end time. This
+ * speeds up lookups a bit
+ */
+ Collections.sort(intervals);
+
+ /*
+ * Make sure there are no intervals in this node with their EndTime
+ * > the one requested. Only need to check the last one since they
+ * are now sorted
+ */
+ assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
+ }
+
+ this.isDone = true;
+ this.nodeEnd = endtime;
+ return;
+ }
+
+ /**
+ * The method to fill up the stateInfo (passed on from the Current State
+ * Tree when it does a query on the SHT). We'll replace the data in that
+ * vector with whatever relevant we can find from this node
+ *
+ * @param stateInfo
+ * The same stateInfo that comes from SHT's doQuery()
+ * @param t
+ * The timestamp for which the query is for. Only return
+ * intervals that intersect t.
+ * @throws TimeRangeException
+ */
+ void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
+ throws TimeRangeException {
+ assert (this.isDone); // not sure this will always be the case...
+ int startIndex;
+
+ if (intervals.size() == 0) {
+ return;
+ }
+ startIndex = getStartIndexFor(t);
+
+ for (int i = startIndex; i < intervals.size(); i++) {
+ /*
+ * Now we only have to compare the Start times, since we now the End
+ * times necessarily fit
+ */
+ if (intervals.get(i).getStartTime() <= t) {
+ stateInfo.set(intervals.get(i).getAttribute(), intervals.get(i));
+ }
+ }
+ return;
+ }
+
+ /**
+ * Get a single Interval from the information in this node If the
+ * key/timestamp pair cannot be found, we return null.
+ *
+ * @param key
+ * @param t
+ * @return The Interval containing the information we want, or null if it
+ * wasn't found
+ * @throws TimeRangeException
+ */
+ HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
+ assert (this.isDone);
+ int startIndex;
+ HTInterval curInterval;
+
+ if (intervals.size() == 0) {
+ return null;
+ }
+
+ startIndex = getStartIndexFor(t);
+
+ for (int i = startIndex; i < intervals.size(); i++) {
+ curInterval = intervals.get(i);
+ if (curInterval.getAttribute() == key
+ && curInterval.getStartTime() <= t
+ && curInterval.getEndTime() >= t) {
+ return curInterval;
+ }
+ }
+ /* We didn't find the relevant information in this node */
+ return null;
+ }
+
+ private int getStartIndexFor(long t) throws TimeRangeException {
+ HTInterval dummy;
+ int index;
+
+ /*
+ * Since the intervals are sorted by end time, we can skip all the ones
+ * at the beginning whose end times are smaller than 't'. Java does
+ * provides a .binarySearch method, but its API is quite weird...
+ */
+ dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
+ index = Collections.binarySearch(intervals, dummy);
+
+ if (index < 0) {
+ /*
+ * .binarySearch returns a negative number if the exact value was
+ * not found. Here we just want to know where to start searching, we
+ * don't care if the value is exact or not.
+ */
+ index = -index - 1;
+
+ }
+
+ /* Sometimes binarySearch yields weird stuff... */
+ if (index < 0) {
+ index = 0;
+ }
+ if (index >= intervals.size()) {
+ index = intervals.size() - 1;
+ }
+
+ /*
+ * Another API quirkiness, the returned index is the one of the *last*
+ * element of a series of equal endtimes, which happens sometimes. We
+ * want the *first* element of such a series, to read through them
+ * again.
+ */
+ while (index > 0
+ && intervals.get(index - 1).compareTo(intervals.get(index)) == 0) {
+ index--;
+ }
+ // FIXME F*ck all this, just do our own binary search in a saner way...
+
+ // //checks to make sure startIndex works how I think it does
+ // if ( startIndex > 0 ) { assert ( intervals.get(startIndex-1).getEnd()
+ // < t ); }
+ // assert ( intervals.get(startIndex).getEnd() >= t );
+ // if ( startIndex < intervals.size()-1 ) { assert (
+ // intervals.get(startIndex+1).getEnd() >= t ); }
+
+ return index;
+ }
+
+ /**
+ * @return The offset, within the node, where the Data section ends
+ */
+ private int getDataSectionEndOffset() {
+ return this.getTotalHeaderSize() + HTNode.getDataEntrySize()
+ * intervals.size();
+ }
+
+ /**
+ * Returns the free space in the node, which is simply put, the
+ * stringSectionOffset - dataSectionOffset
+ */
+ int getNodeFreeSpace() {
+ return stringSectionOffset - this.getDataSectionEndOffset();
+ }
+
+ /**
+ * Returns the current space utilisation of this node, as a percentage.
+ * (used space / total usable space, which excludes the header)
+ */
+ long getNodeUsagePRC() {
+ float freePercent = (float) this.getNodeFreeSpace()
+ / (float) (ownerTree.config.blockSize - this.getTotalHeaderSize())
+ * 100f;
+ return (long) (100L - freePercent);
+ }
+
+ protected final static int getDataEntrySize() {
+ return 16 /* 2 x Timevalue/long (interval start + end) */
+ + 4 /* int (key) */
+ + 1 /* byte (type) */
+ + 4; /* int (valueOffset) */
+ /* = 25 */
+ }
+
+ protected final static byte boolToByte(boolean thebool) {
+ if (thebool) {
+ return (byte) 1;
+ }
+ return (byte) 0;
+ }
+
+ final static boolean byteToBool(byte thebyte) {
+ return (thebyte == (byte) 1);
+ }
+
+ /**
+ * @name Debugging functions
+ */
+
+ @SuppressWarnings("nls")
+ @Override
+ public String toString() {
+ /* Only used for debugging, shouldn't be externalized */
+ StringBuffer buf = new StringBuffer("Node #" + sequenceNumber + ", ");
+ buf.append(this.toStringSpecific());
+ buf.append(intervals.size() + " intervals (" + this.getNodeUsagePRC()
+ + "% used), ");
+
+ buf.append("[" + this.nodeStart + " - ");
+ if (this.isDone) {
+ buf = buf.append("" + this.nodeEnd + "]");
+ } else {
+ buf = buf.append("...]");
+ }
+ return buf.toString();
+ }
+
+ /**
+ * Debugging function that prints out the contents of this node
+ *
+ * @param writer
+ * PrintWriter in which we will print the debug output
+ */
+ @SuppressWarnings("nls")
+ void debugPrintIntervals(PrintWriter writer) {
+ /* Only used for debugging, shouldn't be externalized */
+ writer.println("Node #" + sequenceNumber + ":");
+
+ /* Array of children */
+ if (this.getNodeType() == 1) { /* Only Core Nodes can have children */
+ CoreNode thisNode = (CoreNode) this;
+ writer.print(" " + thisNode.getNbChildren() + " children");
+ if (thisNode.getNbChildren() >= 1) {
+ writer.print(": [ " + thisNode.getChild(0));
+ for (int i = 1; i < thisNode.getNbChildren(); i++) {
+ writer.print(", " + thisNode.getChild(i));
+ }
+ writer.print(']');
+ }
+ writer.print('\n');
+ }
+
+ /* List of intervals in the node */
+ writer.println(" Intervals contained:");
+ for (int i = 0; i < intervals.size(); i++) {
+ writer.println(intervals.get(i).toString());
+ }
+ writer.println('\n');
+ }
+
+ final static int getCommonHeaderSize() {
+ /*
+ * 1 - byte (type)
+ *
+ * 16 - 2x long (start time, end time)
+ *
+ * 16 - 4x int (seq number, parent seq number, intervalcount, strings
+ * section pos.)
+ *
+ * 1 - byte (done or not)
+ */
+ return 34;
+ }
+
+ // ------------------------------------------------------------------------
+ // Abstract methods
+ // ------------------------------------------------------------------------
+
+ protected abstract byte getNodeType();
+
+ protected abstract int getTotalHeaderSize();
+
+ protected abstract void readSpecificHeader(ByteBuffer buffer);
+
+ protected abstract void writeSpecificHeader(ByteBuffer buffer);
+
+ protected abstract String toStringSpecific();
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+
+/**
+ * This class exists mainly for code isolation/clarification purposes. It
+ * contains all the methods and descriptors to handle reading/writing to the
+ * tree-file on disk and all the caching mechanisms. Every HistoryTree should
+ * contain 1 and only 1 HT_IO element.
+ *
+ * @author alexmont
+ *
+ */
+class HT_IO {
+
+ /* reference to the tree to which this IO-object belongs */
+ private final HistoryTree tree;
+
+ /* Fields related to the file I/O */
+ private final File historyTreeFile;
+ private final FileInputStream fis;
+ private final FileOutputStream fos;
+ private final FileChannel fcIn;
+ private final FileChannel fcOut;
+
+ /**
+ * Standard constructor
+ *
+ * @param tree
+ * @param newFile
+ * Are we creating a new file from scratch?
+ * @throws IOException
+ */
+ HT_IO(HistoryTree tree, boolean newFile) throws IOException {
+ this.tree = tree;
+ historyTreeFile = tree.config.stateFile;
+ boolean success1 = true, success2;
+
+ if (newFile) {
+ /* Create a new empty History Tree file */
+ if (historyTreeFile.exists()) {
+ success1 = historyTreeFile.delete();
+ }
+ success2 = historyTreeFile.createNewFile();
+ if (!(success1 && success2)) {
+ /* It seems we do not have permission to create the new file */
+ throw new IOException("Cannot create new file at " + //$NON-NLS-1$
+ historyTreeFile.getName());
+ }
+ fis = new FileInputStream(historyTreeFile);
+ fos = new FileOutputStream(historyTreeFile, false);
+ } else {
+ /*
+ * We want to open an existing file, make sure we don't squash the
+ * existing content when opening the fos!
+ */
+ this.fis = new FileInputStream(historyTreeFile);
+ this.fos = new FileOutputStream(historyTreeFile, true);
+ }
+ this.fcIn = fis.getChannel();
+ this.fcOut = fos.getChannel();
+ }
+
+ /**
+ * Generic "read node" method, which checks if the node is in memory first,
+ * and if it's not it goes to disk to retrieve it.
+ *
+ * @param seqNumber
+ * Sequence number of the node we want
+ * @return The wanted node in object form
+ * @throws ClosedChannelException
+ * If the channel was closed before we could read
+ */
+ HTNode readNode(int seqNumber) throws ClosedChannelException {
+ HTNode node = readNodeFromMemory(seqNumber);
+ if (node == null) {
+ return readNodeFromDisk(seqNumber);
+ }
+ return node;
+ }
+
+ private HTNode readNodeFromMemory(int seqNumber) {
+ for (HTNode node : tree.latestBranch) {
+ if (node.getSequenceNumber() == seqNumber) {
+ return node;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * This method here isn't private, if we know for sure the node cannot be in
+ * memory it's a bit faster to use this directly (when opening a file from
+ * disk for example)
+ *
+ * @throws ClosedChannelException
+ * Usually happens because the file was closed while we were
+ * reading. Instead of using a big reader-writer lock, we'll
+ * just catch this exception.
+ */
+ synchronized HTNode readNodeFromDisk(int seqNumber) throws ClosedChannelException {
+ HTNode readNode;
+ try {
+ seekFCToNodePos(fcIn, seqNumber);
+ readNode = HTNode.readNode(tree, fcIn);
+ return readNode;
+ } catch (ClosedChannelException e) {
+ throw e;
+ } catch (IOException e) {
+ /* Other types of IOExceptions shouldn't happen at this point though */
+ e.printStackTrace();
+ return null;
+ }
+ }
+
+ void writeNode(HTNode node) {
+ try {
+ /* Position ourselves at the start of the node and write it */
+ seekFCToNodePos(fcOut, node.getSequenceNumber());
+ node.writeSelf(fcOut);
+ } catch (IOException e) {
+ /* If we were able to open the file, we should be fine now... */
+ e.printStackTrace();
+ }
+ }
+
+ FileChannel getFcOut() {
+ return this.fcOut;
+ }
+
+ FileInputStream supplyATReader() {
+ try {
+ /*
+ * Position ourselves at the start of the Mapping section in the
+ * file (which is right after the Blocks)
+ */
+ seekFCToNodePos(fcIn, tree.getNodeCount());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return fis;
+ }
+
+ File supplyATWriterFile() {
+ return tree.config.stateFile;
+ }
+
+ long supplyATWriterFilePos() {
+ return HistoryTree.getTreeHeaderSize()
+ + ((long) tree.getNodeCount() * tree.config.blockSize);
+ }
+
+ synchronized void closeFile() {
+ try {
+ fis.close();
+ fos.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ synchronized void deleteFile() {
+ closeFile();
+
+ if(!historyTreeFile.delete()) {
+ /* We didn't succeed in deleting the file */
+ //TODO log it?
+ }
+ }
+
+ /**
+ * Seek the given FileChannel to the position corresponding to the node that
+ * has seqNumber
+ *
+ * @param seqNumber
+ * @throws IOException
+ */
+ private void seekFCToNodePos(FileChannel fc, int seqNumber)
+ throws IOException {
+ fc.position(HistoryTree.getTreeHeaderSize() + (long) seqNumber
+ * tree.config.blockSize);
+ /*
+ * cast to (long) is needed to make sure the result is a long too and
+ * doesn't get truncated
+ */
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.util.Vector;
+
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+
+/**
+ * Meta-container for the History Tree. This structure contains all the
+ * high-level data relevant to the tree.
+ *
+ * @author alexmont
+ *
+ */
+class HistoryTree {
+
+ private static final int HISTORY_FILE_MAGIC_NUMBER = 0x05FFA900;
+
+ /**
+ * File format version. Increment minor on backwards-compatible changes.
+ * Increment major + set minor back to 0 when breaking compatibility.
+ */
+ private static final int MAJOR_VERSION = 3;
+ private static final byte MINOR_VERSION = 0;
+
+ /**
+ * Tree-specific configuration
+ */
+ /* Container for all the configuration constants */
+ protected final HTConfig config;
+
+ /* Reader/writer object */
+ private final HT_IO treeIO;
+
+ /**
+ * Variable Fields (will change throughout the existance of the SHT)
+ */
+ /* Latest timestamp found in the tree (at any given moment) */
+ private long treeEnd;
+
+ /* How many nodes exist in this tree, total */
+ private int nodeCount;
+
+ /* "Cache" to keep the active nodes in memory */
+ protected Vector<CoreNode> latestBranch;
+
+ /**
+ * Create a new State History from scratch, using a SHTConfig object for
+ * configuration
+ *
+ * @param conf
+ * @throws IOException
+ */
+ private HistoryTree(HTConfig conf) throws IOException {
+ /*
+ * Simple assertion to make sure we have enough place in the 0th block
+ * for the tree configuration
+ */
+ assert (conf.blockSize >= getTreeHeaderSize());
+
+ config = conf;
+ treeEnd = conf.treeStart;
+ nodeCount = 0;
+ latestBranch = new Vector<CoreNode>();
+
+ /* Prepare the IO object */
+ treeIO = new HT_IO(this, true);
+
+ /* Add the first node to the tree */
+ CoreNode firstNode = initNewCoreNode(-1, conf.treeStart);
+ latestBranch.add(firstNode);
+ }
+
+ /**
+ * "New State History" constructor, which doesn't use SHTConfig but the
+ * individual values separately. Kept for now for backwards compatibility,
+ * but you should definitely consider using SHTConfig instead (since its
+ * contents can then change without directly affecting SHT's API).
+ */
+ HistoryTree(File newStateFile, int blockSize, int maxChildren,
+ long startTime) throws IOException {
+ this(new HTConfig(newStateFile, blockSize, maxChildren, startTime));
+ }
+
+ /**
+ * "Reader" constructor : instantiate a SHTree from an existing tree file on
+ * disk
+ *
+ * @param existingFileName
+ * Path/filename of the history-file we are to open
+ * @throws IOException
+ */
+ HistoryTree(File existingStateFile) throws IOException {
+ /*
+ * Open the file ourselves, get the tree header information we need,
+ * then pass on the descriptor to the TreeIO object.
+ */
+ int rootNodeSeqNb, res;
+ int bs, maxc;
+ long startTime;
+
+ /* Java I/O mumbo jumbo... */
+ if (!existingStateFile.exists()) {
+ throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
+ }
+ if (existingStateFile.length() <= 0) {
+ throw new IOException("Invalid state file selected, " + //$NON-NLS-1$
+ "target file is empty"); //$NON-NLS-1$
+ }
+
+ FileInputStream fis = new FileInputStream(existingStateFile);
+ ByteBuffer buffer = ByteBuffer.allocate(getTreeHeaderSize());
+ FileChannel fc = fis.getChannel();
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.clear();
+ fc.read(buffer);
+ buffer.flip();
+
+ /*
+ * Check the magic number,to make sure we're opening the right type of
+ * file
+ */
+ res = buffer.getInt();
+ if (res != HISTORY_FILE_MAGIC_NUMBER) {
+ fc.close();
+ fis.close();
+ throw new IOException("Selected file does not" + //$NON-NLS-1$
+ "look like a History Tree file"); //$NON-NLS-1$
+ }
+
+ res = buffer.getInt(); /* Major version number */
+ if (res != MAJOR_VERSION) {
+ fc.close();
+ fis.close();
+ throw new IOException("Select History Tree file is of an older " //$NON-NLS-1$
+ + "format. Please use a previous version of " //$NON-NLS-1$
+ + "the parser to open it."); //$NON-NLS-1$
+ }
+
+ res = buffer.getInt(); /* Minor version number */
+
+ bs = buffer.getInt(); /* Block Size */
+ maxc = buffer.getInt(); /* Max nb of children per node */
+
+ this.nodeCount = buffer.getInt();
+ rootNodeSeqNb = buffer.getInt();
+ startTime = buffer.getLong();
+
+ this.config = new HTConfig(existingStateFile, bs, maxc, startTime);
+ fc.close();
+ fis.close();
+ /*
+ * FIXME We close fis here and the TreeIO will then reopen the same
+ * file, not extremely elegant. But how to pass the information here to
+ * the SHT otherwise?
+ */
+ this.treeIO = new HT_IO(this, false);
+
+ rebuildLatestBranch(rootNodeSeqNb);
+ this.treeEnd = latestBranch.firstElement().getNodeEnd();
+
+ /*
+ * Make sure the history start time we read previously is consistent
+ * with was is actually in the root node.
+ */
+ if (startTime != latestBranch.firstElement().getNodeStart()) {
+ fc.close();
+ fis.close();
+ throw new IOException("Inconsistent start times in the" + //$NON-NLS-1$
+ "history file, it might be corrupted."); //$NON-NLS-1$
+ }
+ }
+
+ /**
+ * "Save" the tree to disk. This method will cause the treeIO object to
+ * commit all nodes to disk and then return the RandomAccessFile descriptor
+ * so the Tree object can save its configuration into the header of the
+ * file.
+ *
+ * @param requestedEndTime
+ */
+ void closeTree(long requestedEndTime) {
+ FileChannel fc;
+ ByteBuffer buffer;
+ int i, res;
+
+ /*
+ * Work-around the "empty branches" that get created when the root node
+ * becomes full. Overwrite the tree's end time with the original wanted
+ * end-time, to ensure no queries are sent into those empty nodes.
+ *
+ * This won't be needed once extended nodes are implemented.
+ */
+ this.treeEnd = requestedEndTime;
+
+ /* Close off the latest branch of the tree */
+ for (i = 0; i < latestBranch.size(); i++) {
+ latestBranch.get(i).closeThisNode(treeEnd);
+ treeIO.writeNode(latestBranch.get(i));
+ }
+
+ /* Only use this for debugging purposes, it's VERY slow! */
+ // this.checkIntegrity();
+
+ fc = treeIO.getFcOut();
+ buffer = ByteBuffer.allocate(getTreeHeaderSize());
+ buffer.order(ByteOrder.LITTLE_ENDIAN);
+ buffer.clear();
+
+ /* Save the config of the tree to the header of the file */
+ try {
+ fc.position(0);
+
+ buffer.putInt(HISTORY_FILE_MAGIC_NUMBER);
+
+ buffer.putInt(MAJOR_VERSION);
+ buffer.putInt(MINOR_VERSION);
+
+ buffer.putInt(config.blockSize);
+ buffer.putInt(config.maxChildren);
+
+ buffer.putInt(nodeCount);
+
+ /* root node seq. nb */
+ buffer.putInt(latestBranch.firstElement().getSequenceNumber());
+
+ /* start time of this history */
+ buffer.putLong(latestBranch.firstElement().getNodeStart());
+
+ buffer.flip();
+ res = fc.write(buffer);
+ assert (res <= getTreeHeaderSize());
+ /* done writing the file header */
+
+ } catch (IOException e) {
+ /* We should not have any problems at this point... */
+ e.printStackTrace();
+ } finally {
+ try {
+ fc.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ return;
+ }
+
+ /**
+ * @name Accessors
+ */
+
+ long getTreeStart() {
+ return config.treeStart;
+ }
+
+ long getTreeEnd() {
+ return treeEnd;
+ }
+
+ int getNodeCount() {
+ return nodeCount;
+ }
+
+ HT_IO getTreeIO() {
+ return treeIO;
+ }
+
+ /**
+ * Rebuild the latestBranch "cache" object by reading the nodes from disk
+ * (When we are opening an existing file on disk and want to append to it,
+ * for example).
+ *
+ * @param rootNodeSeqNb
+ * The sequence number of the root node, so we know where to
+ * start
+ * @throws ClosedChannelException
+ */
+ private void rebuildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
+ HTNode nextChildNode;
+
+ this.latestBranch = new Vector<CoreNode>();
+
+ nextChildNode = treeIO.readNodeFromDisk(rootNodeSeqNb);
+ latestBranch.add((CoreNode) nextChildNode);
+ while (latestBranch.lastElement().getNbChildren() > 0) {
+ nextChildNode = treeIO.readNodeFromDisk(latestBranch.lastElement().getLatestChild());
+ latestBranch.add((CoreNode) nextChildNode);
+ }
+ }
+
+ /**
+ * Insert an interval in the tree
+ *
+ * @param interval
+ */
+ void insertInterval(HTInterval interval) throws TimeRangeException {
+ if (interval.getStartTime() < config.treeStart) {
+ throw new TimeRangeException();
+ }
+ tryInsertAtNode(interval, latestBranch.size() - 1);
+ }
+
+ /**
+ * Inner method to find in which node we should add the interval.
+ *
+ * @param interval
+ * The interval to add to the tree
+ * @param indexOfNode
+ * The index *in the latestBranch* where we are trying the
+ * insertion
+ */
+ private void tryInsertAtNode(HTInterval interval, int indexOfNode) {
+ HTNode targetNode = latestBranch.get(indexOfNode);
+
+ /* Verify if there is enough room in this node to store this interval */
+ if (interval.getIntervalSize() > targetNode.getNodeFreeSpace()) {
+ /* Nope, not enough room. Insert in a new sibling instead. */
+ addSiblingNode(indexOfNode);
+ tryInsertAtNode(interval, latestBranch.size() - 1);
+ return;
+ }
+
+ /* Make sure the interval time range fits this node */
+ if (interval.getStartTime() < targetNode.getNodeStart()) {
+ /*
+ * No, this interval starts before the startTime of this node. We
+ * need to check recursively in parents if it can fit.
+ */
+ assert (indexOfNode >= 1);
+ tryInsertAtNode(interval, indexOfNode - 1);
+ return;
+ }
+
+ /*
+ * Ok, there is room, and the interval fits in this time slot. Let's add
+ * it.
+ */
+ targetNode.addInterval(interval);
+
+ /* Update treeEnd if needed */
+ if (interval.getEndTime() > this.treeEnd) {
+ this.treeEnd = interval.getEndTime();
+ }
+ return;
+ }
+
+ /**
+ * Method to add a sibling to any node in the latest branch. This will add
+ * children back down to the leaf level, if needed.
+ *
+ * @param indexOfNode
+ * The index in latestBranch where we start adding
+ */
+ private void addSiblingNode(int indexOfNode) {
+ int i;
+ CoreNode newNode, prevNode;
+ long splitTime = treeEnd;
+
+ assert (indexOfNode < latestBranch.size());
+
+ /* Check if we need to add a new root node */
+ if (indexOfNode == 0) {
+ addNewRootNode();
+ return;
+ }
+
+ /* Check if we can indeed add a child to the target parent */
+ if (latestBranch.get(indexOfNode - 1).getNbChildren() == config.maxChildren) {
+ /* If not, add a branch starting one level higher instead */
+ addSiblingNode(indexOfNode - 1);
+ return;
+ }
+
+ /* Split off the new branch from the old one */
+ for (i = indexOfNode; i < latestBranch.size(); i++) {
+ latestBranch.get(i).closeThisNode(splitTime);
+ treeIO.writeNode(latestBranch.get(i));
+
+ prevNode = latestBranch.get(i - 1);
+ newNode = initNewCoreNode(prevNode.getSequenceNumber(),
+ splitTime + 1);
+ prevNode.linkNewChild(newNode);
+
+ latestBranch.set(i, newNode);
+ }
+ return;
+ }
+
+ /**
+ * Similar to the previous method, except here we rebuild a completely new
+ * latestBranch
+ */
+ private void addNewRootNode() {
+ int i, depth;
+ CoreNode oldRootNode, newRootNode, newNode, prevNode;
+ long splitTime = this.treeEnd;
+
+ oldRootNode = latestBranch.firstElement();
+ newRootNode = initNewCoreNode(-1, config.treeStart);
+
+ /* Tell the old root node that it isn't root anymore */
+ oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
+
+ /* Close off the whole current latestBranch */
+ for (i = 0; i < latestBranch.size(); i++) {
+ latestBranch.get(i).closeThisNode(splitTime);
+ treeIO.writeNode(latestBranch.get(i));
+ }
+
+ /* Link the new root to its first child (the previous root node) */
+ newRootNode.linkNewChild(oldRootNode);
+
+ /* Rebuild a new latestBranch */
+ depth = latestBranch.size();
+ latestBranch = new Vector<CoreNode>();
+ latestBranch.add(newRootNode);
+ for (i = 1; i < depth + 1; i++) {
+ prevNode = latestBranch.get(i - 1);
+ newNode = initNewCoreNode(prevNode.getParentSequenceNumber(),
+ splitTime + 1);
+ prevNode.linkNewChild(newNode);
+ latestBranch.add(newNode);
+ }
+ }
+
+ /**
+ * Add a new empty node to the tree.
+ *
+ * @param parentSeqNumber
+ * Sequence number of this node's parent
+ * @param startTime
+ * Start time of the new node
+ * @return The newly created node
+ */
+ private CoreNode initNewCoreNode(int parentSeqNumber, long startTime) {
+ CoreNode newNode = new CoreNode(this, this.nodeCount, parentSeqNumber,
+ startTime);
+ this.nodeCount++;
+
+ /* Update the treeEnd if needed */
+ if (startTime >= this.treeEnd) {
+ this.treeEnd = startTime + 1;
+ }
+ return newNode;
+ }
+
+ /**
+ * Inner method to select the next child of the current node intersecting
+ * the given timestamp. Useful for moving down the tree following one
+ * branch.
+ *
+ * @param currentNode
+ * @param t
+ * @return The child node intersecting t
+ * @throws ClosedChannelException
+ * If the file channel was closed while we were reading the tree
+ */
+ HTNode selectNextChild(CoreNode currentNode, long t) throws ClosedChannelException {
+ assert (currentNode.getNbChildren() > 0);
+ int potentialNextSeqNb = currentNode.getSequenceNumber();
+
+ for (int i = 0; i < currentNode.getNbChildren(); i++) {
+ if (t >= currentNode.getChildStart(i)) {
+ potentialNextSeqNb = currentNode.getChild(i);
+ } else {
+ break;
+ }
+ }
+ /*
+ * Once we exit this loop, we should have found a children to follow. If
+ * we didn't, there's a problem.
+ */
+ assert (potentialNextSeqNb != currentNode.getSequenceNumber());
+
+ /*
+ * Since this code path is quite performance-critical, avoid iterating
+ * through the whole latestBranch array if we know for sure the next
+ * node has to be on disk
+ */
+ if (currentNode.isDone()) {
+ return treeIO.readNodeFromDisk(potentialNextSeqNb);
+ }
+ return treeIO.readNode(potentialNextSeqNb);
+ }
+
+ /**
+ * Helper function to get the size of the "tree header" in the tree-file The
+ * nodes will use this offset to know where they should be in the file. This
+ * should always be a multiple of 4K.
+ */
+ static int getTreeHeaderSize() {
+ return 4096;
+ }
+
+ long getFileSize() {
+ return config.stateFile.length();
+ }
+
+ // ------------------------------------------------------------------------
+ // Test/debugging methods
+ // ------------------------------------------------------------------------
+
+ /* Only used for debugging, shouldn't be externalized */
+ @SuppressWarnings("nls")
+ boolean checkNodeIntegrity(HTNode zenode) {
+
+ HTNode otherNode;
+ CoreNode node;
+ StringBuffer buf = new StringBuffer();
+ boolean ret = true;
+
+ // FIXME /* Only testing Core Nodes for now */
+ if (!(zenode instanceof CoreNode)) {
+ return true;
+ }
+
+ node = (CoreNode) zenode;
+
+ try {
+ /*
+ * Test that this node's start and end times match the start of the
+ * first child and the end of the last child, respectively
+ */
+ if (node.getNbChildren() > 0) {
+ otherNode = treeIO.readNode(node.getChild(0));
+ if (node.getNodeStart() != otherNode.getNodeStart()) {
+ buf.append("Start time of node (" + node.getNodeStart() + ") "
+ + "does not match start time of first child " + "("
+ + otherNode.getNodeStart() + "), " + "node #"
+ + otherNode.getSequenceNumber() + ")\n");
+ ret = false;
+ }
+ if (node.isDone()) {
+ otherNode = treeIO.readNode(node.getLatestChild());
+ if (node.getNodeEnd() != otherNode.getNodeEnd()) {
+ buf.append("End time of node (" + node.getNodeEnd()
+ + ") does not match end time of last child ("
+ + otherNode.getNodeEnd() + ", node #"
+ + otherNode.getSequenceNumber() + ")\n");
+ ret = false;
+ }
+ }
+ }
+
+ /*
+ * Test that the childStartTimes[] array matches the real nodes' start
+ * times
+ */
+ for (int i = 0; i < node.getNbChildren(); i++) {
+ otherNode = treeIO.readNode(node.getChild(i));
+ if (otherNode.getNodeStart() != node.getChildStart(i)) {
+ buf.append(" Expected start time of child node #"
+ + node.getChild(i) + ": " + node.getChildStart(i)
+ + "\n" + " Actual start time of node #"
+ + otherNode.getSequenceNumber() + ": "
+ + otherNode.getNodeStart() + "\n");
+ ret = false;
+ }
+ }
+
+ } catch (ClosedChannelException e) {
+ e.printStackTrace();
+ }
+
+ if (!ret) {
+ System.out.println("");
+ System.out.println("SHT: Integrity check failed for node #"
+ + node.getSequenceNumber() + ":");
+ System.out.println(buf.toString());
+ }
+ return ret;
+ }
+
+ void checkIntegrity() {
+ try {
+ for (int i = 0; i < nodeCount; i++) {
+ checkNodeIntegrity(treeIO.readNode(i));
+ }
+ } catch (ClosedChannelException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /* Only used for debugging, shouldn't be externalized */
+ @SuppressWarnings("nls")
+ @Override
+ public String toString() {
+ return "Information on the current tree:\n\n" + "Blocksize: "
+ + config.blockSize + "\n" + "Max nb. of children per node: "
+ + config.maxChildren + "\n" + "Number of nodes: " + nodeCount
+ + "\n" + "Depth of the tree: " + latestBranch.size() + "\n"
+ + "Size of the treefile: " + this.getFileSize() + "\n"
+ + "Root node has sequence number: "
+ + latestBranch.firstElement().getSequenceNumber() + "\n"
+ + "'Latest leaf' has sequence number: "
+ + latestBranch.lastElement().getSequenceNumber();
+ }
+
+ private int curDepth;
+
+ /**
+ * Start at currentNode and print the contents of all its children, in
+ * pre-order. Give the root node in parameter to visit the whole tree, and
+ * have a nice overview.
+ */
+ @SuppressWarnings("nls")
+ private void preOrderPrint(PrintWriter writer, boolean printIntervals,
+ CoreNode currentNode) {
+ /* Only used for debugging, shouldn't be externalized */
+ int i, j;
+ HTNode nextNode;
+
+ writer.println(currentNode.toString());
+ if (printIntervals) {
+ currentNode.debugPrintIntervals(writer);
+ }
+ curDepth++;
+
+ try {
+ for (i = 0; i < currentNode.getNbChildren(); i++) {
+ nextNode = treeIO.readNode(currentNode.getChild(i));
+ assert (nextNode instanceof CoreNode); // TODO temporary
+ for (j = 0; j < curDepth - 1; j++) {
+ writer.print(" ");
+ }
+ writer.print("+-");
+ preOrderPrint(writer, printIntervals, (CoreNode) nextNode);
+ }
+ } catch (ClosedChannelException e) {
+ e.printStackTrace();
+ }
+ curDepth--;
+ return;
+ }
+
+ /**
+ * Print out the full tree for debugging purposes
+ *
+ * @param writer
+ * PrintWriter in which to write the output
+ * @param printIntervals
+ * Says if you want to output the full interval information
+ */
+ void debugPrintFullTree(PrintWriter writer, boolean printIntervals) {
+ /* Only used for debugging, shouldn't be externalized */
+ curDepth = 0;
+ this.preOrderPrint(writer, false, latestBranch.firstElement());
+
+ if (printIntervals) {
+ writer.println("\nDetails of intervals:"); //$NON-NLS-1$
+ curDepth = 0;
+ this.preOrderPrint(writer, true, latestBranch.firstElement());
+ }
+ writer.println('\n');
+ }
+
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.channels.ClosedChannelException;
+import java.util.List;
+
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
+import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
+
+/**
+ * History Tree backend for storing a state history. This is the basic version
+ * that runs in the same thread as the class creating it.
+ *
+ * @author alexmont
+ *
+ */
+public class HistoryTreeBackend implements IStateHistoryBackend {
+
+ /** The history tree that sits underneath */
+ protected final HistoryTree sht;
+
+ /** Direct reference to the tree's IO object */
+ private final HT_IO treeIO;
+
+ /** Indicates if the history tree construction is done */
+ protected boolean isFinishedBuilding = false;
+
+ /**
+ * Construtor for new history files. Use this when creating a new history
+ * from scratch.
+ *
+ * @param newStateFile
+ * The filename/location where to store the state history (Should
+ * end in .ht)
+ * @param blockSize
+ * The size of the blocks in the history file. This should be a
+ * multiple of 4096.
+ * @param maxChildren
+ * The maximum number of children each core node can have
+ * @param startTime
+ * The earliest time stamp that will be stored in the history
+ * @throws IOException
+ * Thrown if we can't create the file for some reason
+ */
+ public HistoryTreeBackend(File newStateFile, int blockSize,
+ int maxChildren, long startTime) throws IOException {
+ sht = new HistoryTree(newStateFile, blockSize, maxChildren, startTime);
+ treeIO = sht.getTreeIO();
+ }
+
+ /**
+ * Construtor for new history files. Use this when creating a new history
+ * from scratch. This version supplies sane defaults for the configuration
+ * parameters.
+ *
+ * @param newStateFile
+ * The filename/location where to store the state history (Should
+ * end in .ht)
+ * @param startTime
+ * The earliest time stamp that will be stored in the history
+ * @throws IOException
+ * Thrown if we can't create the file for some reason
+ */
+ public HistoryTreeBackend(File newStateFile, long startTime)
+ throws IOException {
+ this(newStateFile, 64 * 1024, 50, startTime);
+ }
+
+ /**
+ * Existing history constructor. Use this to open an existing state-file.
+ *
+ * @param existingStateFile
+ * Filename/location of the history we want to load
+ * @throws IOException
+ * If we can't read the file, if it doesn't exist or is not
+ * recognized
+ */
+ public HistoryTreeBackend(File existingStateFile) throws IOException {
+ sht = new HistoryTree(existingStateFile);
+ treeIO = sht.getTreeIO();
+ isFinishedBuilding = true;
+ }
+
+ @Override
+ public long getStartTime() {
+ return sht.getTreeStart();
+ }
+
+ @Override
+ public long getEndTime() {
+ return sht.getTreeEnd();
+ }
+
+ @Override
+ public void insertPastState(long stateStartTime, long stateEndTime,
+ int quark, ITmfStateValue value) throws TimeRangeException {
+ HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
+ quark, (TmfStateValue) value);
+
+ /* Start insertions at the "latest leaf" */
+ sht.insertInterval(interval);
+ }
+
+ @Override
+ public void finishedBuilding(long endTime) {
+ sht.closeTree(endTime);
+ isFinishedBuilding = true;
+ }
+
+ @Override
+ public FileInputStream supplyAttributeTreeReader() {
+ return treeIO.supplyATReader();
+ }
+
+ @Override
+ public File supplyAttributeTreeWriterFile() {
+ return treeIO.supplyATWriterFile();
+ }
+
+ @Override
+ public long supplyAttributeTreeWriterFilePosition() {
+ return treeIO.supplyATWriterFilePos();
+ }
+
+ @Override
+ public void removeFiles() {
+ treeIO.deleteFile();
+ }
+
+ @Override
+ public void dispose() {
+ if (isFinishedBuilding) {
+ treeIO.closeFile();
+ } else {
+ /*
+ * The build is being interrupted, delete the file we partially
+ * built since it won't be complete, so shouldn't be re-used in the
+ * future (.deleteFile() will close the file first)
+ */
+ treeIO.deleteFile();
+ }
+ }
+
+ @Override
+ public void doQuery(List<ITmfStateInterval> stateInfo, long t)
+ throws TimeRangeException, StateSystemDisposedException {
+ if (!checkValidTime(t)) {
+ /* We can't possibly have information about this query */
+ throw new TimeRangeException();
+ }
+
+ /* We start by reading the information in the root node */
+ // FIXME using CoreNode for now, we'll have to redo this part to handle
+ // different node types
+ CoreNode currentNode = sht.latestBranch.firstElement();
+ currentNode.writeInfoFromNode(stateInfo, t);
+
+ /* Then we follow the branch down in the relevant children */
+ try {
+ while (currentNode.getNbChildren() > 0) {
+ currentNode = (CoreNode) sht.selectNextChild(currentNode, t);
+ currentNode.writeInfoFromNode(stateInfo, t);
+ }
+ } catch (ClosedChannelException e) {
+ throw new StateSystemDisposedException();
+ }
+
+ /*
+ * The stateInfo should now be filled with everything needed, we pass
+ * the control back to the State System.
+ */
+ return;
+ }
+
+ @Override
+ public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
+ throws TimeRangeException, StateSystemDisposedException {
+ return getRelevantInterval(t, attributeQuark);
+ }
+
+ @Override
+ public boolean checkValidTime(long t) {
+ return (t >= sht.getTreeStart() && t <= sht.getTreeEnd());
+ }
+
+ /**
+ * Inner method to find the interval in the tree containing the requested
+ * key/timestamp pair, wherever in which node it is.
+ *
+ * @param t
+ * @param key
+ * @return The node containing the information we want
+ */
+ private HTInterval getRelevantInterval(long t, int key)
+ throws TimeRangeException, StateSystemDisposedException {
+ if (!checkValidTime(t)) {
+ throw new TimeRangeException();
+ }
+
+ // FIXME using CoreNode for now, we'll have to redo this part to handle
+ // different node types
+ CoreNode currentNode = sht.latestBranch.firstElement();
+ HTInterval interval = currentNode.getRelevantInterval(key, t);
+
+ try {
+ while (interval == null && currentNode.getNbChildren() > 0) {
+ currentNode = (CoreNode) sht.selectNextChild(currentNode, t);
+ interval = currentNode.getRelevantInterval(key, t);
+ }
+ } catch (ClosedChannelException e) {
+ throw new StateSystemDisposedException();
+ }
+ /*
+ * Since we should now have intervals at every attribute/timestamp
+ * combination, it should NOT be null here.
+ */
+ assert (interval != null);
+ return interval;
+ }
+
+ /**
+ * Return the size of the tree history file
+ *
+ * @return The current size of the history file in bytes
+ */
+ public long getFileSize() {
+ return sht.getFileSize();
+ }
+
+ /**
+ * Return the current depth of the tree, ie the number of node levels.
+ *
+ * @return The tree depth
+ */
+ public int getTreeDepth() {
+ return sht.latestBranch.size();
+ }
+
+ /**
+ * Return the average node usage as a percentage (between 0 and 100)
+ *
+ * @return Average node usage %
+ */
+ public int getAverageNodeUsage() {
+ HTNode node;
+ long total = 0;
+ long ret;
+
+ try {
+ for (int seq = 0; seq < sht.getNodeCount(); seq++) {
+ node = treeIO.readNode(seq);
+ total += node.getNodeUsagePRC();
+ }
+ } catch (ClosedChannelException e) {
+ e.printStackTrace();
+ }
+
+ ret = total / sht.getNodeCount();
+ assert (ret >= 0 && ret <= 100);
+ return (int) ret;
+ }
+
+ @Override
+ public void debugPrint(PrintWriter writer) {
+ /* By default don't print out all the intervals */
+ this.debugPrint(writer, false);
+ }
+
+ /**
+ * The basic debugPrint method will print the tree structure, but not their
+ * contents.
+ *
+ * This method here print the contents (the intervals) as well.
+ *
+ * @param writer
+ * The PrintWriter to which the debug info will be written
+ * @param printIntervals
+ * Should we also print every contained interval individually?
+ */
+ public void debugPrint(PrintWriter writer, boolean printIntervals) {
+ /* Only used for debugging, shouldn't be externalized */
+ writer.println("------------------------------"); //$NON-NLS-1$
+ writer.println("State History Tree:\n"); //$NON-NLS-1$
+ writer.println(sht.toString());
+ writer.println("Average node utilization: " //$NON-NLS-1$
+ + this.getAverageNodeUsage());
+ writer.println(""); //$NON-NLS-1$
+
+ sht.debugPrintFullTree(writer, printIntervals);
+ }
+}
--- /dev/null
+/*******************************************************************************
+ * Copyright (c) 2012 Ericsson
+ * Copyright (c) 2010, 2011 École Polytechnique de Montréal
+ * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
+ *
+ * All rights reserved. This program and the accompanying materials are
+ * made available under the terms of the Eclipse Public License v1.0 which
+ * accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ *******************************************************************************/
+
+package org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+import org.eclipse.linuxtools.tmf.core.event.TmfTimestamp;
+import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
+import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
+import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
+
+/**
+ * Variant of the HistoryTreeBackend which runs all the interval-insertion logic
+ * in a separate thread.
+ *
+ * @author alexmont
+ *
+ */
+public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
+ implements Runnable {
+
+ /*
+ * From superclass:
+ *
+ * protected final StateHistoryTree sht;
+ */
+
+ private BlockingQueue<HTInterval> intervalQueue;
+ private final Thread shtThread;
+
+ /**
+ * New state history constructor
+ *
+ * Note that it usually doesn't make sense to use a Threaded HT if you're
+ * opening an existing state-file, but you know what you're doing...
+ *
+ * @param newStateFile
+ * The name of the history file that will be created. Should end
+ * in ".ht"
+ * @param blockSize
+ * The size of the blocks in the file
+ * @param maxChildren
+ * The maximum number of children allowed for each core node
+ * @param startTime
+ * The earliest timestamp stored in the history
+ * @param queueSize
+ * The size of the interval insertion queue. 2000 - 10000 usually
+ * works well
+ * @throws IOException
+ * If there was a problem opening the history file for writing
+ */
+ public ThreadedHistoryTreeBackend(File newStateFile, int blockSize,
+ int maxChildren, long startTime, int queueSize) throws IOException {
+ super(newStateFile, blockSize, maxChildren, startTime);
+
+ intervalQueue = new ArrayBlockingQueue<HTInterval>(queueSize);
+ shtThread = new Thread(this, "History Tree Thread"); //$NON-NLS-1$
+ shtThread.start();
+ }
+
+ /**
+ * New State History constructor. This version provides default values for
+ * blockSize and maxChildren.
+ *
+ * @param newStateFile
+ * The name of the history file that will be created. Should end
+ * in ".ht"
+ * @param startTime
+ * The earliest timestamp stored in the history
+ * @param queueSize
+ * The size of the interval insertion queue. 2000 - 10000 usually
+ * works well
+ * @throws IOException
+ * If there was a problem opening the history file for writing
+ */
+ public ThreadedHistoryTreeBackend(File newStateFile, long startTime,
+ int queueSize) throws IOException {
+ super(newStateFile, startTime);
+
+ intervalQueue = new ArrayBlockingQueue<HTInterval>(queueSize);
+ shtThread = new Thread(this, "History Tree Thread"); //$NON-NLS-1$
+ shtThread.start();
+ }
+
+ /*
+ * The Threaded version does not specify an "existing file" constructor,
+ * since the history is already built (and we only use the other thread
+ * during building). Just use a plain HistoryTreeProvider in this case.
+ *
+ * TODO but what about streaming??
+ */
+
+ @Override
+ public void insertPastState(long stateStartTime, long stateEndTime,
+ int quark, ITmfStateValue value) throws TimeRangeException {
+ /*
+ * Here, instead of directly inserting the elements in the History Tree
+ * underneath, we'll put them in the Queue. They will then be taken and
+ * processed by the other thread executing the run() method.
+ */
+ HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
+ quark, (TmfStateValue) value);
+ try {
+ intervalQueue.put(interval);
+ } catch (InterruptedException e) {
+ /* We should not get interrupted here */
+ System.out.println("State system got interrupted!"); //$NON-NLS-1$
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void finishedBuilding(long endTime) {
+ /*
+ * We need to commit everything in the History Tree and stop the
+ * standalone thread before returning to the StateHistorySystem. (SHS
+ * will then write the Attribute Tree to the file, that must not happen
+ * at the same time we are writing the last nodes!)
+ */
+
+ stopRunningThread(endTime);
+ isFinishedBuilding = true;
+ return;
+ }
+
+ @Override
+ public void dispose() {
+ if (!isFinishedBuilding) {
+ stopRunningThread(TmfTimestamp.PROJECT_IS_CANNED.getValue());
+ }
+ /*
+ * isFinishedBuilding remains false, so the superclass will ask the
+ * back-end to delete the file.
+ */
+ super.dispose();
+ }
+
+ private void stopRunningThread(long endTime) {
+ if (!shtThread.isAlive()) {
+ return;
+ }
+
+ /*
+ * Send a "poison pill" in the queue, then wait for the HT to finish
+ * its closeTree()
+ */
+ try {
+ HTInterval pill = new HTInterval(-1, endTime, -1, TmfStateValue.nullValue());
+ intervalQueue.put(pill);
+ shtThread.join();
+ } catch (TimeRangeException e) {
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void run() {
+ if (intervalQueue == null) {
+ System.err.println("Cannot start the storage backend without its interval queue."); //$NON-NLS-1$
+ return;
+ }
+ HTInterval currentInterval;
+ try {
+ currentInterval = intervalQueue.take();
+ while (currentInterval.getStartTime() != -1) {
+ /* Send the interval to the History Tree */
+ sht.insertInterval(currentInterval);
+ currentInterval = intervalQueue.take();
+ }
+ assert (currentInterval.getAttribute() == -1);
+ /*
+ * We've been told we're done, let's write down everything and quit.
+ * The end time of this "signal interval" is actually correct.
+ */
+ sht.closeTree(currentInterval.getEndTime());
+ return;
+ } catch (InterruptedException e) {
+ /* We've been interrupted abnormally */
+ System.out.println("State History Tree interrupted!"); //$NON-NLS-1$
+ e.printStackTrace();
+ } catch (TimeRangeException e) {
+ /* This also should not happen */
+ e.printStackTrace();
+ }
+ }
+
+}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.nio.ByteBuffer;
-
-/**
- * A Core node is a first-level node of a History Tree which is not a leaf node.
- *
- * It extends HTNode by adding support for child nodes, and also extensions.
- *
- * @author alexmont
- *
- */
-class CoreNode extends HTNode {
-
- /* Nb. of children this node has */
- private int nbChildren;
-
- /* Seq. numbers of the children nodes (size = MAX_NB_CHILDREN) */
- private int[] children;
-
- /* Start times of each of the children (size = MAX_NB_CHILDREN) */
- private long[] childStart;
-
- /* Seq number of this node's extension. -1 if none */
- private int extension;
-
- /**
- * Initial constructor. Use this to initialize a new EMPTY node.
- *
- * @param tree
- * The HistoryTree to which this node belongs
- * @param seqNumber
- * The (unique) sequence number assigned to this particular node
- * @param parentSeqNumber
- * The sequence number of this node's parent node
- * @param start
- * The earliest timestamp stored in this node
- */
- CoreNode(HistoryTree tree, int seqNumber, int parentSeqNumber,
- long start) {
- super(tree, seqNumber, parentSeqNumber, start);
- this.nbChildren = 0;
-
- /*
- * We instantiate the two following arrays at full size right away,
- * since we want to reserve that space in the node's header.
- * "this.nbChildren" will tell us how many relevant entries there are in
- * those tables.
- */
- this.children = new int[ownerTree.config.maxChildren];
- this.childStart = new long[ownerTree.config.maxChildren];
- }
-
- @Override
- protected void readSpecificHeader(ByteBuffer buffer) {
- int i;
-
- extension = buffer.getInt();
- nbChildren = buffer.getInt();
-
- children = new int[ownerTree.config.maxChildren];
- for (i = 0; i < nbChildren; i++) {
- children[i] = buffer.getInt();
- }
- for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
- buffer.getInt();
- }
-
- this.childStart = new long[ownerTree.config.maxChildren];
- for (i = 0; i < nbChildren; i++) {
- childStart[i] = buffer.getLong();
- }
- for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
- buffer.getLong();
- }
- }
-
- @Override
- protected void writeSpecificHeader(ByteBuffer buffer) {
- int i;
-
- buffer.putInt(extension);
- buffer.putInt(nbChildren);
-
- /* Write the "children's seq number" array */
- for (i = 0; i < nbChildren; i++) {
- buffer.putInt(children[i]);
- }
- for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
- buffer.putInt(0);
- }
-
- /* Write the "children's start times" array */
- for (i = 0; i < nbChildren; i++) {
- buffer.putLong(childStart[i]);
- }
- for (i = nbChildren; i < ownerTree.config.maxChildren; i++) {
- buffer.putLong(0);
- }
- }
-
- int getNbChildren() {
- return nbChildren;
- }
-
- int getChild(int index) {
- return children[index];
- }
-
- int getLatestChild() {
- return children[nbChildren - 1];
- }
-
- long getChildStart(int index) {
- return childStart[index];
- }
-
- long getLatestChildStart() {
- return childStart[nbChildren - 1];
- }
-
- int getExtensionSequenceNumber() {
- return extension;
- }
-
- /**
- * Tell this node that it has a new child (Congrats!)
- *
- * @param childNode
- * The SHTNode object of the new child
- */
- void linkNewChild(CoreNode childNode) {
- assert (this.nbChildren < ownerTree.config.maxChildren);
-
- this.children[nbChildren] = childNode.getSequenceNumber();
- this.childStart[nbChildren] = childNode.getNodeStart();
- this.nbChildren++;
- }
-
- @Override
- protected byte getNodeType() {
- return 1;
- }
-
- @Override
- protected int getTotalHeaderSize() {
- int specificSize;
- specificSize = 4 /* 1x int (extension node) */
- + 4 /* 1x int (nbChildren) */
-
- /* MAX_NB * int ('children' table) */
- + 4 * ownerTree.config.maxChildren
-
- /* MAX_NB * Timevalue ('childStart' table) */
- + 8 * ownerTree.config.maxChildren;
-
- return getCommonHeaderSize() + specificSize;
- }
-
- @Override
- protected String toStringSpecific() {
- /* Only used for debugging, shouldn't be externalized */
- return "Core Node, " + nbChildren + " children, "; //$NON-NLS-1$ //$NON-NLS-2$
- }
-
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.File;
-
-/**
- * Configuration object for a StateHistoryTree.
- *
- * @author alexmont
- *
- */
-final class HTConfig {
-
- public final File stateFile;
- public final int blockSize;
- public final int maxChildren;
- public final long treeStart;
-
- HTConfig(File newStateFile, int blockSize, int maxChildren, long startTime) {
- this.stateFile = newStateFile;
- this.blockSize = blockSize;
- this.maxChildren = maxChildren;
- this.treeStart = startTime;
- }
-
- /**
- * Version using default values for blocksize and maxchildren
- *
- * @param stateFileName
- * @param startTime
- */
- HTConfig(File newStateFile, long startTime) {
- this(newStateFile, 64 * 1024, 50, startTime);
- }
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.eclipse.linuxtools.tmf.core.exceptions.StateValueTypeException;
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
-import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
-import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
-
-/**
- * The interval component, which will be contained in a node of the History
- * Tree.
- *
- * @author alexmont
- *
- */
-final class HTInterval implements ITmfStateInterval, Comparable<HTInterval> {
-
- private final long start;
- private final long end;
- private final int attribute;
- private final TmfStateValue sv;
-
- /*
- * Size of the strings section entry used by this interval (= 0 if not used)
- */
- private final int stringsEntrySize;
-
- /**
- * Standard constructor
- *
- * @param intervalStart
- * @param intervalEnd
- * @param attribute
- * @param value
- * @throws TimeRangeException
- */
- HTInterval(long intervalStart, long intervalEnd, int attribute,
- TmfStateValue value) throws TimeRangeException {
- if (intervalStart > intervalEnd) {
- throw new TimeRangeException();
- }
-
- this.start = intervalStart;
- this.end = intervalEnd;
- this.attribute = attribute;
- this.sv = value;
- this.stringsEntrySize = computeStringsEntrySize();
- }
-
- /**
- * Reader constructor. Builds the interval using an already-allocated
- * ByteBuffer, which normally comes from a NIO FileChannel.
- *
- * @param buffer
- * The ByteBuffer from which to read the information
- * @throws IOException
- */
- final static HTInterval readFrom(ByteBuffer buffer) throws IOException {
- HTInterval interval;
- long intervalStart, intervalEnd;
- int attribute;
- TmfStateValue value;
- int valueOrOffset, valueSize, res;
- byte valueType;
- byte array[];
-
- /* Read the Data Section entry */
- intervalStart = buffer.getLong();
- intervalEnd = buffer.getLong();
- attribute = buffer.getInt();
-
- /* Read the 'type' of the value, then react accordingly */
- valueType = buffer.get();
- if (valueType <= 0) {
- /* the type of ValueOrOffset is 'value' */
- valueOrOffset = buffer.getInt();
- if (valueOrOffset == -1) {
- /* Null value */
- value = TmfStateValue.nullValue();
- } else {
- /* Normal integer value */
- value = TmfStateValue.newValueInt(valueOrOffset);
- }
-
- } else { // valueType > 0
- /* the type is 'offset' */
- valueOrOffset = buffer.getInt();
-
- /*
- * Go read the corresponding entry in the Strings section of the
- * block
- */
- buffer.mark();
- buffer.position(valueOrOffset);
-
- /* the first byte = the size to read */
- valueSize = buffer.get();
-
- /*
- * Careful though, 'valueSize' is the total size of the entry,
- * including the 'size' byte at the start and end (0'ed) byte at the
- * end. Here we want 'array' to only contain the real payload of the
- * value.
- */
- array = new byte[valueSize - 2];
- buffer.get(array);
- value = TmfStateValue.newValueString(new String(array));
-
- /* Confirm the 0'ed byte at the end */
- res = buffer.get();
- if (res != 0) {
- throw new IOException(
- "Invalid interval data. Maybe your file is corrupt?"); //$NON-NLS-1$
- }
-
- /*
- * Restore the file pointer's position (so we can read the next
- * interval)
- */
- buffer.reset();
- }
-
- try {
- interval = new HTInterval(intervalStart, intervalEnd, attribute,
- value);
- } catch (TimeRangeException e) {
- throw new IOException(
- "Invalid interval data. Maybe your file is corrupt?"); //$NON-NLS-1$
- }
- return interval;
- }
-
- /**
- * Antagonist of the previous constructor, write the Data entry
- * corresponding to this interval in a ByteBuffer (mapped to a block in the
- * history-file, hopefully)
- *
- * @param buffer
- * The already-allocated ByteBuffer corresponding to a SHT Node
- * @param endPosOfStringEntry
- * The initial (before calling this function for this interval)
- * position of the Strings Entry for this node. This will change
- * from one call to the other if we're writing String
- * StateValues.
- * @return The size of the Strings Entry that was written, if any.
- */
- int writeInterval(ByteBuffer buffer, int endPosOfStringEntry) {
- int sizeOfStringEntry;
- byte[] byteArrayToWrite;
-
- buffer.putLong(start);
- buffer.putLong(end);
- buffer.putInt(attribute);
- buffer.put(sv.getType());
-
- byteArrayToWrite = sv.toByteArray();
-
- if (byteArrayToWrite == null) {
- /* We write the 'valueOffset' field as a straight value. In the case
- * of a null value, it will be unboxed as -1 */
- try {
- buffer.putInt(sv.unboxInt());
- } catch (StateValueTypeException e) {
- /*
- * This should not happen, since the value told us it was of
- * type Null or Integer (corrupted value?)
- */
- e.printStackTrace();
- }
- return 0; /* we didn't use a Strings section entry */
-
- }
- /*
- * Size to write (+2 = +1 for size at the start, +1 for the 0 at the
- * end)
- */
- sizeOfStringEntry = byteArrayToWrite.length + 2;
-
- /* we use the valueOffset as an offset. */
- buffer.putInt(endPosOfStringEntry - sizeOfStringEntry);
- buffer.mark();
- buffer.position(endPosOfStringEntry - sizeOfStringEntry);
-
- /*
- * write the Strings entry (1st byte = size, then the bytes, then the 0)
- */
- buffer.put((byte) sizeOfStringEntry);
- buffer.put(byteArrayToWrite);
- buffer.put((byte) 0);
- assert (buffer.position() == endPosOfStringEntry);
- buffer.reset();
- return sizeOfStringEntry;
- }
-
- @Override
- public long getStartTime() {
- return start;
- }
-
- @Override
- public long getEndTime() {
- return end;
- }
-
- @Override
- public long getViewerEndTime() {
- return end + 1;
- }
-
- @Override
- public int getAttribute() {
- return attribute;
- }
-
- @Override
- public ITmfStateValue getStateValue() {
- return sv;
- }
-
- @Override
- public boolean intersects(long timestamp) {
- if (start <= timestamp) {
- if (end >= timestamp) {
- return true;
- }
- }
- return false;
- }
-
- int getStringsEntrySize() {
- return stringsEntrySize;
- }
-
- /**
- * Total serialized size of this interval
- *
- * @return
- */
- int getIntervalSize() {
- return stringsEntrySize + HTNode.getDataEntrySize();
- }
-
- private int computeStringsEntrySize() {
- if (sv.toByteArray() == null) {
- return 0;
- }
- return sv.toByteArray().length + 2;
- /* (+1 for the first byte indicating the size, +1 for the 0'ed byte) */
- }
-
- /**
- * Compare the END TIMES of different intervals. This is used to sort the
- * intervals when we close down a node.
- */
- @Override
- public int compareTo(HTInterval other) {
- if (this.end < other.end) {
- return -1;
- } else if (this.end > other.end) {
- return 1;
- } else {
- return 0;
- }
- }
-
- @Override
- public boolean equals(Object other) {
- if (other instanceof HTInterval) {
- if (this.compareTo((HTInterval) other) == 0) {
- return true;
- }
- }
- return false;
- }
-
- @Override
- public int hashCode() {
- return super.hashCode();
- }
-
- @Override
- public String toString() {
- /* Only for debug, should not be externalized */
- StringBuilder sb = new StringBuilder();
- sb.append('[');
- sb.append(start);
- sb.append(", "); //$NON-NLS-1$
- sb.append(end);
- sb.append(']');
-
- sb.append(", attribute = "); //$NON-NLS-1$
- sb.append(attribute);
-
- sb.append(", value = "); //$NON-NLS-1$
- sb.append(sv.toString());
-
- return sb.toString();
- }
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.FileChannel;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
-import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
-
-/**
- * The base class for all the types of nodes that go in the History Tree.
- *
- * @author alexmont
- *
- */
-abstract class HTNode {
-
- /* Reference to the History Tree to whom this node belongs */
- protected final HistoryTree ownerTree;
-
- /* Time range of this node */
- private final long nodeStart;
- private long nodeEnd;
-
- /* Sequence number = position in the node section of the file */
- private final int sequenceNumber;
- private int parentSequenceNumber; /* = -1 if this node is the root node */
-
- /* Where the Strings section begins (from the start of the node */
- private int stringSectionOffset;
-
- /* True if this node is closed (and to be committed to disk) */
- private boolean isDone;
-
- /* Vector containing all the intervals contained in this node */
- private final ArrayList<HTInterval> intervals;
-
- HTNode(HistoryTree tree, int seqNumber, int parentSeqNumber, long start) {
- this.ownerTree = tree;
- this.nodeStart = start;
- this.sequenceNumber = seqNumber;
- this.parentSequenceNumber = parentSeqNumber;
-
- this.stringSectionOffset = ownerTree.config.blockSize;
- this.isDone = false;
- this.intervals = new ArrayList<HTInterval>();
- }
-
- /**
- * Reader factory constructor. Build a Node object (of the right type) by
- * reading a block in the file.
- *
- * @param tree
- * Reference to the HT which will own this node
- * @param fc
- * FileChannel to the history file, ALREADY SEEKED at the start
- * of the node.
- * @throws IOException
- */
- final static HTNode readNode(HistoryTree tree, FileChannel fc)
- throws IOException {
- HTNode newNode = null;
- int res, i;
-
- ByteBuffer buffer = ByteBuffer.allocate(tree.config.blockSize);
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.clear();
- res = fc.read(buffer);
- assert (res == tree.config.blockSize);
- // This often breaks, so might as well keep this code not too far...
- // if ( res != tree.config.blockSize ) {
- // tree.debugPrintFullTree(new PrintWriter(System.out, true), null,
- // false);
- // assert ( false );
- // }
- buffer.flip();
-
- /* Read the common header part */
- byte type = buffer.get();
- long start = buffer.getLong();
- long end = buffer.getLong();
- int seqNb = buffer.getInt();
- int parentSeqNb = buffer.getInt();
- int intervalCount = buffer.getInt();
- int stringSectionOffset = buffer.getInt();
- boolean done = byteToBool(buffer.get());
-
- /* Now the rest of the header depends on the node type */
- switch (type) {
- case 1:
- /* Core nodes */
- newNode = new CoreNode(tree, seqNb, parentSeqNb, start);
- newNode.readSpecificHeader(buffer);
- break;
-
- // TODO implement other node types
- // case 2:
- // /* Leaf nodes */
- //
- // break;
- //
- //
- // case 3:
- // /* "Claudette" (extended) nodes */
- //
- // break;
-
- default:
- /* Unrecognized node type */
- throw new IOException();
- }
-
- /*
- * At this point, we should be done reading the header and 'buffer'
- * should only have the intervals left
- */
- for (i = 0; i < intervalCount; i++) {
- newNode.intervals.add(HTInterval.readFrom(buffer));
- }
-
- /* Assign the node's other information we have read previously */
- newNode.nodeEnd = end;
- newNode.stringSectionOffset = stringSectionOffset;
- newNode.isDone = done;
-
- return newNode;
- }
-
- final void writeSelf(FileChannel fc) throws IOException {
- int res, size;
- int curStringsEntryEndPos = ownerTree.config.blockSize;
-
- ByteBuffer buffer = ByteBuffer.allocate(ownerTree.config.blockSize);
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.clear();
-
- /* Write the common header part */
- buffer.put(this.getNodeType());
- buffer.putLong(nodeStart);
- buffer.putLong(nodeEnd);
- buffer.putInt(sequenceNumber);
- buffer.putInt(parentSequenceNumber);
- buffer.putInt(intervals.size());
- buffer.putInt(stringSectionOffset);
- buffer.put(boolToByte(isDone));
-
- /* Now call the inner method to write the specific header part */
- this.writeSpecificHeader(buffer);
-
- /* Back to us, we write the intervals */
- for (HTInterval interval : intervals) {
- size = interval.writeInterval(buffer, curStringsEntryEndPos);
- curStringsEntryEndPos -= size;
- }
-
- /*
- * Write padding between the end of the Data section and the start of
- * the Strings section (needed to fill the node in case there is no
- * Strings section)
- */
- while (buffer.position() < stringSectionOffset) {
- buffer.put((byte) 0);
- }
-
- /*
- * If the offsets were right, the size of the Strings section should be
- * == to the expected size
- */
- assert (curStringsEntryEndPos == stringSectionOffset);
-
- /* Finally, write everything in the Buffer to disk */
-
- // if we don't do this, flip() will lose what's after.
- buffer.position(ownerTree.config.blockSize);
-
- buffer.flip();
- res = fc.write(buffer);
- assert (res == ownerTree.config.blockSize);
- }
-
- /**
- * Accessors
- */
- long getNodeStart() {
- return nodeStart;
- }
-
- long getNodeEnd() {
- if (this.isDone) {
- return nodeEnd;
- }
- return 0;
- }
-
- int getSequenceNumber() {
- return sequenceNumber;
- }
-
- int getParentSequenceNumber() {
- return parentSequenceNumber;
- }
-
- /**
- * Change this node's parent. Used when we create a new root node for
- * example.
- */
- void setParentSequenceNumber(int newParent) {
- parentSequenceNumber = newParent;
- }
-
- boolean isDone() {
- return isDone;
- }
-
- /**
- * Add an interval to this node
- *
- * @param newInterval
- */
- void addInterval(HTInterval newInterval) {
- /* Just in case, but should be checked before even calling this function */
- assert (newInterval.getIntervalSize() <= this.getNodeFreeSpace());
-
- intervals.add(newInterval);
-
- /* Update the in-node offset "pointer" */
- stringSectionOffset -= (newInterval.getStringsEntrySize());
- }
-
- /**
- * We've received word from the containerTree that newest nodes now exist to
- * our right. (Puts isDone = true and sets the endtime)
- *
- * @param endtime
- * The nodeEnd time that the node will have
- * @throws TimeRangeException
- */
- void closeThisNode(long endtime) {
- assert (endtime >= this.nodeStart);
- // /* This also breaks often too */
- // if ( endtime.getValue() <= this.nodeStart.getValue() ) {
- // ownerTree.debugPrintFullTree(new PrintWriter(System.out, true), null,
- // false);
- // assert ( false );
- // }
-
- if (intervals.size() > 0) {
- /*
- * Sort the intervals by ascending order of their end time. This
- * speeds up lookups a bit
- */
- Collections.sort(intervals);
-
- /*
- * Make sure there are no intervals in this node with their EndTime
- * > the one requested. Only need to check the last one since they
- * are now sorted
- */
- assert (endtime >= intervals.get(intervals.size() - 1).getEndTime());
- }
-
- this.isDone = true;
- this.nodeEnd = endtime;
- return;
- }
-
- /**
- * The method to fill up the stateInfo (passed on from the Current State
- * Tree when it does a query on the SHT). We'll replace the data in that
- * vector with whatever relevant we can find from this node
- *
- * @param stateInfo
- * The same stateInfo that comes from SHT's doQuery()
- * @param t
- * The timestamp for which the query is for. Only return
- * intervals that intersect t.
- * @throws TimeRangeException
- */
- void writeInfoFromNode(List<ITmfStateInterval> stateInfo, long t)
- throws TimeRangeException {
- assert (this.isDone); // not sure this will always be the case...
- int startIndex;
-
- if (intervals.size() == 0) {
- return;
- }
- startIndex = getStartIndexFor(t);
-
- for (int i = startIndex; i < intervals.size(); i++) {
- /*
- * Now we only have to compare the Start times, since we now the End
- * times necessarily fit
- */
- if (intervals.get(i).getStartTime() <= t) {
- stateInfo.set(intervals.get(i).getAttribute(), intervals.get(i));
- }
- }
- return;
- }
-
- /**
- * Get a single Interval from the information in this node If the
- * key/timestamp pair cannot be found, we return null.
- *
- * @param key
- * @param t
- * @return The Interval containing the information we want, or null if it
- * wasn't found
- * @throws TimeRangeException
- */
- HTInterval getRelevantInterval(int key, long t) throws TimeRangeException {
- assert (this.isDone);
- int startIndex;
- HTInterval curInterval;
-
- if (intervals.size() == 0) {
- return null;
- }
-
- startIndex = getStartIndexFor(t);
-
- for (int i = startIndex; i < intervals.size(); i++) {
- curInterval = intervals.get(i);
- if (curInterval.getAttribute() == key
- && curInterval.getStartTime() <= t
- && curInterval.getEndTime() >= t) {
- return curInterval;
- }
- }
- /* We didn't find the relevant information in this node */
- return null;
- }
-
- private int getStartIndexFor(long t) throws TimeRangeException {
- HTInterval dummy;
- int index;
-
- /*
- * Since the intervals are sorted by end time, we can skip all the ones
- * at the beginning whose end times are smaller than 't'. Java does
- * provides a .binarySearch method, but its API is quite weird...
- */
- dummy = new HTInterval(0, t, 0, TmfStateValue.nullValue());
- index = Collections.binarySearch(intervals, dummy);
-
- if (index < 0) {
- /*
- * .binarySearch returns a negative number if the exact value was
- * not found. Here we just want to know where to start searching, we
- * don't care if the value is exact or not.
- */
- index = -index - 1;
-
- }
-
- /* Sometimes binarySearch yields weird stuff... */
- if (index < 0) {
- index = 0;
- }
- if (index >= intervals.size()) {
- index = intervals.size() - 1;
- }
-
- /*
- * Another API quirkiness, the returned index is the one of the *last*
- * element of a series of equal endtimes, which happens sometimes. We
- * want the *first* element of such a series, to read through them
- * again.
- */
- while (index > 0
- && intervals.get(index - 1).compareTo(intervals.get(index)) == 0) {
- index--;
- }
- // FIXME F*ck all this, just do our own binary search in a saner way...
-
- // //checks to make sure startIndex works how I think it does
- // if ( startIndex > 0 ) { assert ( intervals.get(startIndex-1).getEnd()
- // < t ); }
- // assert ( intervals.get(startIndex).getEnd() >= t );
- // if ( startIndex < intervals.size()-1 ) { assert (
- // intervals.get(startIndex+1).getEnd() >= t ); }
-
- return index;
- }
-
- /**
- * @return The offset, within the node, where the Data section ends
- */
- private int getDataSectionEndOffset() {
- return this.getTotalHeaderSize() + HTNode.getDataEntrySize()
- * intervals.size();
- }
-
- /**
- * Returns the free space in the node, which is simply put, the
- * stringSectionOffset - dataSectionOffset
- */
- int getNodeFreeSpace() {
- return stringSectionOffset - this.getDataSectionEndOffset();
- }
-
- /**
- * Returns the current space utilisation of this node, as a percentage.
- * (used space / total usable space, which excludes the header)
- */
- long getNodeUsagePRC() {
- float freePercent = (float) this.getNodeFreeSpace()
- / (float) (ownerTree.config.blockSize - this.getTotalHeaderSize())
- * 100f;
- return (long) (100L - freePercent);
- }
-
- protected final static int getDataEntrySize() {
- return 16 /* 2 x Timevalue/long (interval start + end) */
- + 4 /* int (key) */
- + 1 /* byte (type) */
- + 4; /* int (valueOffset) */
- /* = 25 */
- }
-
- protected final static byte boolToByte(boolean thebool) {
- if (thebool) {
- return (byte) 1;
- }
- return (byte) 0;
- }
-
- final static boolean byteToBool(byte thebyte) {
- return (thebyte == (byte) 1);
- }
-
- /**
- * @name Debugging functions
- */
-
- @SuppressWarnings("nls")
- @Override
- public String toString() {
- /* Only used for debugging, shouldn't be externalized */
- StringBuffer buf = new StringBuffer("Node #" + sequenceNumber + ", ");
- buf.append(this.toStringSpecific());
- buf.append(intervals.size() + " intervals (" + this.getNodeUsagePRC()
- + "% used), ");
-
- buf.append("[" + this.nodeStart + " - ");
- if (this.isDone) {
- buf = buf.append("" + this.nodeEnd + "]");
- } else {
- buf = buf.append("...]");
- }
- return buf.toString();
- }
-
- /**
- * Debugging function that prints out the contents of this node
- *
- * @param writer
- * PrintWriter in which we will print the debug output
- */
- @SuppressWarnings("nls")
- void debugPrintIntervals(PrintWriter writer) {
- /* Only used for debugging, shouldn't be externalized */
- writer.println("Node #" + sequenceNumber + ":");
-
- /* Array of children */
- if (this.getNodeType() == 1) { /* Only Core Nodes can have children */
- CoreNode thisNode = (CoreNode) this;
- writer.print(" " + thisNode.getNbChildren() + " children");
- if (thisNode.getNbChildren() >= 1) {
- writer.print(": [ " + thisNode.getChild(0));
- for (int i = 1; i < thisNode.getNbChildren(); i++) {
- writer.print(", " + thisNode.getChild(i));
- }
- writer.print(']');
- }
- writer.print('\n');
- }
-
- /* List of intervals in the node */
- writer.println(" Intervals contained:");
- for (int i = 0; i < intervals.size(); i++) {
- writer.println(intervals.get(i).toString());
- }
- writer.println('\n');
- }
-
- final static int getCommonHeaderSize() {
- /*
- * 1 - byte (type)
- *
- * 16 - 2x long (start time, end time)
- *
- * 16 - 4x int (seq number, parent seq number, intervalcount, strings
- * section pos.)
- *
- * 1 - byte (done or not)
- */
- return 34;
- }
-
- // ------------------------------------------------------------------------
- // Abstract methods
- // ------------------------------------------------------------------------
-
- protected abstract byte getNodeType();
-
- protected abstract int getTotalHeaderSize();
-
- protected abstract void readSpecificHeader(ByteBuffer buffer);
-
- protected abstract void writeSpecificHeader(ByteBuffer buffer);
-
- protected abstract String toStringSpecific();
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.FileChannel;
-
-/**
- * This class exists mainly for code isolation/clarification purposes. It
- * contains all the methods and descriptors to handle reading/writing to the
- * tree-file on disk and all the caching mechanisms. Every HistoryTree should
- * contain 1 and only 1 HT_IO element.
- *
- * @author alexmont
- *
- */
-class HT_IO {
-
- /* reference to the tree to which this IO-object belongs */
- private final HistoryTree tree;
-
- /* Fields related to the file I/O */
- private final File historyTreeFile;
- private final FileInputStream fis;
- private final FileOutputStream fos;
- private final FileChannel fcIn;
- private final FileChannel fcOut;
-
- /**
- * Standard constructor
- *
- * @param tree
- * @param newFile
- * Are we creating a new file from scratch?
- * @throws IOException
- */
- HT_IO(HistoryTree tree, boolean newFile) throws IOException {
- this.tree = tree;
- historyTreeFile = tree.config.stateFile;
- boolean success1 = true, success2;
-
- if (newFile) {
- /* Create a new empty History Tree file */
- if (historyTreeFile.exists()) {
- success1 = historyTreeFile.delete();
- }
- success2 = historyTreeFile.createNewFile();
- if (!(success1 && success2)) {
- /* It seems we do not have permission to create the new file */
- throw new IOException("Cannot create new file at " + //$NON-NLS-1$
- historyTreeFile.getName());
- }
- fis = new FileInputStream(historyTreeFile);
- fos = new FileOutputStream(historyTreeFile, false);
- } else {
- /*
- * We want to open an existing file, make sure we don't squash the
- * existing content when opening the fos!
- */
- this.fis = new FileInputStream(historyTreeFile);
- this.fos = new FileOutputStream(historyTreeFile, true);
- }
- this.fcIn = fis.getChannel();
- this.fcOut = fos.getChannel();
- }
-
- /**
- * Generic "read node" method, which checks if the node is in memory first,
- * and if it's not it goes to disk to retrieve it.
- *
- * @param seqNumber
- * Sequence number of the node we want
- * @return The wanted node in object form
- * @throws ClosedChannelException
- * If the channel was closed before we could read
- */
- HTNode readNode(int seqNumber) throws ClosedChannelException {
- HTNode node = readNodeFromMemory(seqNumber);
- if (node == null) {
- return readNodeFromDisk(seqNumber);
- }
- return node;
- }
-
- private HTNode readNodeFromMemory(int seqNumber) {
- for (HTNode node : tree.latestBranch) {
- if (node.getSequenceNumber() == seqNumber) {
- return node;
- }
- }
- return null;
- }
-
- /**
- * This method here isn't private, if we know for sure the node cannot be in
- * memory it's a bit faster to use this directly (when opening a file from
- * disk for example)
- *
- * @throws ClosedChannelException
- * Usually happens because the file was closed while we were
- * reading. Instead of using a big reader-writer lock, we'll
- * just catch this exception.
- */
- synchronized HTNode readNodeFromDisk(int seqNumber) throws ClosedChannelException {
- HTNode readNode;
- try {
- seekFCToNodePos(fcIn, seqNumber);
- readNode = HTNode.readNode(tree, fcIn);
- return readNode;
- } catch (ClosedChannelException e) {
- throw e;
- } catch (IOException e) {
- /* Other types of IOExceptions shouldn't happen at this point though */
- e.printStackTrace();
- return null;
- }
- }
-
- void writeNode(HTNode node) {
- try {
- /* Position ourselves at the start of the node and write it */
- seekFCToNodePos(fcOut, node.getSequenceNumber());
- node.writeSelf(fcOut);
- } catch (IOException e) {
- /* If we were able to open the file, we should be fine now... */
- e.printStackTrace();
- }
- }
-
- FileChannel getFcOut() {
- return this.fcOut;
- }
-
- FileInputStream supplyATReader() {
- try {
- /*
- * Position ourselves at the start of the Mapping section in the
- * file (which is right after the Blocks)
- */
- seekFCToNodePos(fcIn, tree.getNodeCount());
- } catch (IOException e) {
- e.printStackTrace();
- }
- return fis;
- }
-
- File supplyATWriterFile() {
- return tree.config.stateFile;
- }
-
- long supplyATWriterFilePos() {
- return HistoryTree.getTreeHeaderSize()
- + ((long) tree.getNodeCount() * tree.config.blockSize);
- }
-
- synchronized void closeFile() {
- try {
- fis.close();
- fos.close();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- synchronized void deleteFile() {
- closeFile();
-
- if(!historyTreeFile.delete()) {
- /* We didn't succeed in deleting the file */
- //TODO log it?
- }
- }
-
- /**
- * Seek the given FileChannel to the position corresponding to the node that
- * has seqNumber
- *
- * @param seqNumber
- * @throws IOException
- */
- private void seekFCToNodePos(FileChannel fc, int seqNumber)
- throws IOException {
- fc.position(HistoryTree.getTreeHeaderSize() + (long) seqNumber
- * tree.config.blockSize);
- /*
- * cast to (long) is needed to make sure the result is a long too and
- * doesn't get truncated
- */
- }
-
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.FileChannel;
-import java.util.Vector;
-
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-
-/**
- * Meta-container for the History Tree. This structure contains all the
- * high-level data relevant to the tree.
- *
- * @author alexmont
- *
- */
-class HistoryTree {
-
- private static final int HISTORY_FILE_MAGIC_NUMBER = 0x05FFA900;
-
- /**
- * File format version. Increment minor on backwards-compatible changes.
- * Increment major + set minor back to 0 when breaking compatibility.
- */
- private static final int MAJOR_VERSION = 3;
- private static final byte MINOR_VERSION = 0;
-
- /**
- * Tree-specific configuration
- */
- /* Container for all the configuration constants */
- protected final HTConfig config;
-
- /* Reader/writer object */
- private final HT_IO treeIO;
-
- /**
- * Variable Fields (will change throughout the existance of the SHT)
- */
- /* Latest timestamp found in the tree (at any given moment) */
- private long treeEnd;
-
- /* How many nodes exist in this tree, total */
- private int nodeCount;
-
- /* "Cache" to keep the active nodes in memory */
- protected Vector<CoreNode> latestBranch;
-
- /**
- * Create a new State History from scratch, using a SHTConfig object for
- * configuration
- *
- * @param conf
- * @throws IOException
- */
- private HistoryTree(HTConfig conf) throws IOException {
- /*
- * Simple assertion to make sure we have enough place in the 0th block
- * for the tree configuration
- */
- assert (conf.blockSize >= getTreeHeaderSize());
-
- config = conf;
- treeEnd = conf.treeStart;
- nodeCount = 0;
- latestBranch = new Vector<CoreNode>();
-
- /* Prepare the IO object */
- treeIO = new HT_IO(this, true);
-
- /* Add the first node to the tree */
- CoreNode firstNode = initNewCoreNode(-1, conf.treeStart);
- latestBranch.add(firstNode);
- }
-
- /**
- * "New State History" constructor, which doesn't use SHTConfig but the
- * individual values separately. Kept for now for backwards compatibility,
- * but you should definitely consider using SHTConfig instead (since its
- * contents can then change without directly affecting SHT's API).
- */
- HistoryTree(File newStateFile, int blockSize, int maxChildren,
- long startTime) throws IOException {
- this(new HTConfig(newStateFile, blockSize, maxChildren, startTime));
- }
-
- /**
- * "Reader" constructor : instantiate a SHTree from an existing tree file on
- * disk
- *
- * @param existingFileName
- * Path/filename of the history-file we are to open
- * @throws IOException
- */
- HistoryTree(File existingStateFile) throws IOException {
- /*
- * Open the file ourselves, get the tree header information we need,
- * then pass on the descriptor to the TreeIO object.
- */
- int rootNodeSeqNb, res;
- int bs, maxc;
- long startTime;
-
- /* Java I/O mumbo jumbo... */
- if (!existingStateFile.exists()) {
- throw new IOException("Selected state file does not exist"); //$NON-NLS-1$
- }
- if (existingStateFile.length() <= 0) {
- throw new IOException("Invalid state file selected, " + //$NON-NLS-1$
- "target file is empty"); //$NON-NLS-1$
- }
-
- FileInputStream fis = new FileInputStream(existingStateFile);
- ByteBuffer buffer = ByteBuffer.allocate(getTreeHeaderSize());
- FileChannel fc = fis.getChannel();
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.clear();
- fc.read(buffer);
- buffer.flip();
-
- /*
- * Check the magic number,to make sure we're opening the right type of
- * file
- */
- res = buffer.getInt();
- if (res != HISTORY_FILE_MAGIC_NUMBER) {
- fc.close();
- fis.close();
- throw new IOException("Selected file does not" + //$NON-NLS-1$
- "look like a History Tree file"); //$NON-NLS-1$
- }
-
- res = buffer.getInt(); /* Major version number */
- if (res != MAJOR_VERSION) {
- fc.close();
- fis.close();
- throw new IOException("Select History Tree file is of an older " //$NON-NLS-1$
- + "format. Please use a previous version of " //$NON-NLS-1$
- + "the parser to open it."); //$NON-NLS-1$
- }
-
- res = buffer.getInt(); /* Minor version number */
-
- bs = buffer.getInt(); /* Block Size */
- maxc = buffer.getInt(); /* Max nb of children per node */
-
- this.nodeCount = buffer.getInt();
- rootNodeSeqNb = buffer.getInt();
- startTime = buffer.getLong();
-
- this.config = new HTConfig(existingStateFile, bs, maxc, startTime);
- fc.close();
- fis.close();
- /*
- * FIXME We close fis here and the TreeIO will then reopen the same
- * file, not extremely elegant. But how to pass the information here to
- * the SHT otherwise?
- */
- this.treeIO = new HT_IO(this, false);
-
- rebuildLatestBranch(rootNodeSeqNb);
- this.treeEnd = latestBranch.firstElement().getNodeEnd();
-
- /*
- * Make sure the history start time we read previously is consistent
- * with was is actually in the root node.
- */
- if (startTime != latestBranch.firstElement().getNodeStart()) {
- fc.close();
- fis.close();
- throw new IOException("Inconsistent start times in the" + //$NON-NLS-1$
- "history file, it might be corrupted."); //$NON-NLS-1$
- }
- }
-
- /**
- * "Save" the tree to disk. This method will cause the treeIO object to
- * commit all nodes to disk and then return the RandomAccessFile descriptor
- * so the Tree object can save its configuration into the header of the
- * file.
- *
- * @param requestedEndTime
- */
- void closeTree(long requestedEndTime) {
- FileChannel fc;
- ByteBuffer buffer;
- int i, res;
-
- /*
- * Work-around the "empty branches" that get created when the root node
- * becomes full. Overwrite the tree's end time with the original wanted
- * end-time, to ensure no queries are sent into those empty nodes.
- *
- * This won't be needed once extended nodes are implemented.
- */
- this.treeEnd = requestedEndTime;
-
- /* Close off the latest branch of the tree */
- for (i = 0; i < latestBranch.size(); i++) {
- latestBranch.get(i).closeThisNode(treeEnd);
- treeIO.writeNode(latestBranch.get(i));
- }
-
- /* Only use this for debugging purposes, it's VERY slow! */
- // this.checkIntegrity();
-
- fc = treeIO.getFcOut();
- buffer = ByteBuffer.allocate(getTreeHeaderSize());
- buffer.order(ByteOrder.LITTLE_ENDIAN);
- buffer.clear();
-
- /* Save the config of the tree to the header of the file */
- try {
- fc.position(0);
-
- buffer.putInt(HISTORY_FILE_MAGIC_NUMBER);
-
- buffer.putInt(MAJOR_VERSION);
- buffer.putInt(MINOR_VERSION);
-
- buffer.putInt(config.blockSize);
- buffer.putInt(config.maxChildren);
-
- buffer.putInt(nodeCount);
-
- /* root node seq. nb */
- buffer.putInt(latestBranch.firstElement().getSequenceNumber());
-
- /* start time of this history */
- buffer.putLong(latestBranch.firstElement().getNodeStart());
-
- buffer.flip();
- res = fc.write(buffer);
- assert (res <= getTreeHeaderSize());
- /* done writing the file header */
-
- } catch (IOException e) {
- /* We should not have any problems at this point... */
- e.printStackTrace();
- } finally {
- try {
- fc.close();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- return;
- }
-
- /**
- * @name Accessors
- */
-
- long getTreeStart() {
- return config.treeStart;
- }
-
- long getTreeEnd() {
- return treeEnd;
- }
-
- int getNodeCount() {
- return nodeCount;
- }
-
- HT_IO getTreeIO() {
- return treeIO;
- }
-
- /**
- * Rebuild the latestBranch "cache" object by reading the nodes from disk
- * (When we are opening an existing file on disk and want to append to it,
- * for example).
- *
- * @param rootNodeSeqNb
- * The sequence number of the root node, so we know where to
- * start
- * @throws ClosedChannelException
- */
- private void rebuildLatestBranch(int rootNodeSeqNb) throws ClosedChannelException {
- HTNode nextChildNode;
-
- this.latestBranch = new Vector<CoreNode>();
-
- nextChildNode = treeIO.readNodeFromDisk(rootNodeSeqNb);
- latestBranch.add((CoreNode) nextChildNode);
- while (latestBranch.lastElement().getNbChildren() > 0) {
- nextChildNode = treeIO.readNodeFromDisk(latestBranch.lastElement().getLatestChild());
- latestBranch.add((CoreNode) nextChildNode);
- }
- }
-
- /**
- * Insert an interval in the tree
- *
- * @param interval
- */
- void insertInterval(HTInterval interval) throws TimeRangeException {
- if (interval.getStartTime() < config.treeStart) {
- throw new TimeRangeException();
- }
- tryInsertAtNode(interval, latestBranch.size() - 1);
- }
-
- /**
- * Inner method to find in which node we should add the interval.
- *
- * @param interval
- * The interval to add to the tree
- * @param indexOfNode
- * The index *in the latestBranch* where we are trying the
- * insertion
- */
- private void tryInsertAtNode(HTInterval interval, int indexOfNode) {
- HTNode targetNode = latestBranch.get(indexOfNode);
-
- /* Verify if there is enough room in this node to store this interval */
- if (interval.getIntervalSize() > targetNode.getNodeFreeSpace()) {
- /* Nope, not enough room. Insert in a new sibling instead. */
- addSiblingNode(indexOfNode);
- tryInsertAtNode(interval, latestBranch.size() - 1);
- return;
- }
-
- /* Make sure the interval time range fits this node */
- if (interval.getStartTime() < targetNode.getNodeStart()) {
- /*
- * No, this interval starts before the startTime of this node. We
- * need to check recursively in parents if it can fit.
- */
- assert (indexOfNode >= 1);
- tryInsertAtNode(interval, indexOfNode - 1);
- return;
- }
-
- /*
- * Ok, there is room, and the interval fits in this time slot. Let's add
- * it.
- */
- targetNode.addInterval(interval);
-
- /* Update treeEnd if needed */
- if (interval.getEndTime() > this.treeEnd) {
- this.treeEnd = interval.getEndTime();
- }
- return;
- }
-
- /**
- * Method to add a sibling to any node in the latest branch. This will add
- * children back down to the leaf level, if needed.
- *
- * @param indexOfNode
- * The index in latestBranch where we start adding
- */
- private void addSiblingNode(int indexOfNode) {
- int i;
- CoreNode newNode, prevNode;
- long splitTime = treeEnd;
-
- assert (indexOfNode < latestBranch.size());
-
- /* Check if we need to add a new root node */
- if (indexOfNode == 0) {
- addNewRootNode();
- return;
- }
-
- /* Check if we can indeed add a child to the target parent */
- if (latestBranch.get(indexOfNode - 1).getNbChildren() == config.maxChildren) {
- /* If not, add a branch starting one level higher instead */
- addSiblingNode(indexOfNode - 1);
- return;
- }
-
- /* Split off the new branch from the old one */
- for (i = indexOfNode; i < latestBranch.size(); i++) {
- latestBranch.get(i).closeThisNode(splitTime);
- treeIO.writeNode(latestBranch.get(i));
-
- prevNode = latestBranch.get(i - 1);
- newNode = initNewCoreNode(prevNode.getSequenceNumber(),
- splitTime + 1);
- prevNode.linkNewChild(newNode);
-
- latestBranch.set(i, newNode);
- }
- return;
- }
-
- /**
- * Similar to the previous method, except here we rebuild a completely new
- * latestBranch
- */
- private void addNewRootNode() {
- int i, depth;
- CoreNode oldRootNode, newRootNode, newNode, prevNode;
- long splitTime = this.treeEnd;
-
- oldRootNode = latestBranch.firstElement();
- newRootNode = initNewCoreNode(-1, config.treeStart);
-
- /* Tell the old root node that it isn't root anymore */
- oldRootNode.setParentSequenceNumber(newRootNode.getSequenceNumber());
-
- /* Close off the whole current latestBranch */
- for (i = 0; i < latestBranch.size(); i++) {
- latestBranch.get(i).closeThisNode(splitTime);
- treeIO.writeNode(latestBranch.get(i));
- }
-
- /* Link the new root to its first child (the previous root node) */
- newRootNode.linkNewChild(oldRootNode);
-
- /* Rebuild a new latestBranch */
- depth = latestBranch.size();
- latestBranch = new Vector<CoreNode>();
- latestBranch.add(newRootNode);
- for (i = 1; i < depth + 1; i++) {
- prevNode = latestBranch.get(i - 1);
- newNode = initNewCoreNode(prevNode.getParentSequenceNumber(),
- splitTime + 1);
- prevNode.linkNewChild(newNode);
- latestBranch.add(newNode);
- }
- }
-
- /**
- * Add a new empty node to the tree.
- *
- * @param parentSeqNumber
- * Sequence number of this node's parent
- * @param startTime
- * Start time of the new node
- * @return The newly created node
- */
- private CoreNode initNewCoreNode(int parentSeqNumber, long startTime) {
- CoreNode newNode = new CoreNode(this, this.nodeCount, parentSeqNumber,
- startTime);
- this.nodeCount++;
-
- /* Update the treeEnd if needed */
- if (startTime >= this.treeEnd) {
- this.treeEnd = startTime + 1;
- }
- return newNode;
- }
-
- /**
- * Inner method to select the next child of the current node intersecting
- * the given timestamp. Useful for moving down the tree following one
- * branch.
- *
- * @param currentNode
- * @param t
- * @return The child node intersecting t
- * @throws ClosedChannelException
- * If the file channel was closed while we were reading the tree
- */
- HTNode selectNextChild(CoreNode currentNode, long t) throws ClosedChannelException {
- assert (currentNode.getNbChildren() > 0);
- int potentialNextSeqNb = currentNode.getSequenceNumber();
-
- for (int i = 0; i < currentNode.getNbChildren(); i++) {
- if (t >= currentNode.getChildStart(i)) {
- potentialNextSeqNb = currentNode.getChild(i);
- } else {
- break;
- }
- }
- /*
- * Once we exit this loop, we should have found a children to follow. If
- * we didn't, there's a problem.
- */
- assert (potentialNextSeqNb != currentNode.getSequenceNumber());
-
- /*
- * Since this code path is quite performance-critical, avoid iterating
- * through the whole latestBranch array if we know for sure the next
- * node has to be on disk
- */
- if (currentNode.isDone()) {
- return treeIO.readNodeFromDisk(potentialNextSeqNb);
- }
- return treeIO.readNode(potentialNextSeqNb);
- }
-
- /**
- * Helper function to get the size of the "tree header" in the tree-file The
- * nodes will use this offset to know where they should be in the file. This
- * should always be a multiple of 4K.
- */
- static int getTreeHeaderSize() {
- return 4096;
- }
-
- long getFileSize() {
- return config.stateFile.length();
- }
-
- // ------------------------------------------------------------------------
- // Test/debugging methods
- // ------------------------------------------------------------------------
-
- /* Only used for debugging, shouldn't be externalized */
- @SuppressWarnings("nls")
- boolean checkNodeIntegrity(HTNode zenode) {
-
- HTNode otherNode;
- CoreNode node;
- StringBuffer buf = new StringBuffer();
- boolean ret = true;
-
- // FIXME /* Only testing Core Nodes for now */
- if (!(zenode instanceof CoreNode)) {
- return true;
- }
-
- node = (CoreNode) zenode;
-
- try {
- /*
- * Test that this node's start and end times match the start of the
- * first child and the end of the last child, respectively
- */
- if (node.getNbChildren() > 0) {
- otherNode = treeIO.readNode(node.getChild(0));
- if (node.getNodeStart() != otherNode.getNodeStart()) {
- buf.append("Start time of node (" + node.getNodeStart() + ") "
- + "does not match start time of first child " + "("
- + otherNode.getNodeStart() + "), " + "node #"
- + otherNode.getSequenceNumber() + ")\n");
- ret = false;
- }
- if (node.isDone()) {
- otherNode = treeIO.readNode(node.getLatestChild());
- if (node.getNodeEnd() != otherNode.getNodeEnd()) {
- buf.append("End time of node (" + node.getNodeEnd()
- + ") does not match end time of last child ("
- + otherNode.getNodeEnd() + ", node #"
- + otherNode.getSequenceNumber() + ")\n");
- ret = false;
- }
- }
- }
-
- /*
- * Test that the childStartTimes[] array matches the real nodes' start
- * times
- */
- for (int i = 0; i < node.getNbChildren(); i++) {
- otherNode = treeIO.readNode(node.getChild(i));
- if (otherNode.getNodeStart() != node.getChildStart(i)) {
- buf.append(" Expected start time of child node #"
- + node.getChild(i) + ": " + node.getChildStart(i)
- + "\n" + " Actual start time of node #"
- + otherNode.getSequenceNumber() + ": "
- + otherNode.getNodeStart() + "\n");
- ret = false;
- }
- }
-
- } catch (ClosedChannelException e) {
- e.printStackTrace();
- }
-
- if (!ret) {
- System.out.println("");
- System.out.println("SHT: Integrity check failed for node #"
- + node.getSequenceNumber() + ":");
- System.out.println(buf.toString());
- }
- return ret;
- }
-
- void checkIntegrity() {
- try {
- for (int i = 0; i < nodeCount; i++) {
- checkNodeIntegrity(treeIO.readNode(i));
- }
- } catch (ClosedChannelException e) {
- e.printStackTrace();
- }
- }
-
- /* Only used for debugging, shouldn't be externalized */
- @SuppressWarnings("nls")
- @Override
- public String toString() {
- return "Information on the current tree:\n\n" + "Blocksize: "
- + config.blockSize + "\n" + "Max nb. of children per node: "
- + config.maxChildren + "\n" + "Number of nodes: " + nodeCount
- + "\n" + "Depth of the tree: " + latestBranch.size() + "\n"
- + "Size of the treefile: " + this.getFileSize() + "\n"
- + "Root node has sequence number: "
- + latestBranch.firstElement().getSequenceNumber() + "\n"
- + "'Latest leaf' has sequence number: "
- + latestBranch.lastElement().getSequenceNumber();
- }
-
- private int curDepth;
-
- /**
- * Start at currentNode and print the contents of all its children, in
- * pre-order. Give the root node in parameter to visit the whole tree, and
- * have a nice overview.
- */
- @SuppressWarnings("nls")
- private void preOrderPrint(PrintWriter writer, boolean printIntervals,
- CoreNode currentNode) {
- /* Only used for debugging, shouldn't be externalized */
- int i, j;
- HTNode nextNode;
-
- writer.println(currentNode.toString());
- if (printIntervals) {
- currentNode.debugPrintIntervals(writer);
- }
- curDepth++;
-
- try {
- for (i = 0; i < currentNode.getNbChildren(); i++) {
- nextNode = treeIO.readNode(currentNode.getChild(i));
- assert (nextNode instanceof CoreNode); // TODO temporary
- for (j = 0; j < curDepth - 1; j++) {
- writer.print(" ");
- }
- writer.print("+-");
- preOrderPrint(writer, printIntervals, (CoreNode) nextNode);
- }
- } catch (ClosedChannelException e) {
- e.printStackTrace();
- }
- curDepth--;
- return;
- }
-
- /**
- * Print out the full tree for debugging purposes
- *
- * @param writer
- * PrintWriter in which to write the output
- * @param printIntervals
- * Says if you want to output the full interval information
- */
- void debugPrintFullTree(PrintWriter writer, boolean printIntervals) {
- /* Only used for debugging, shouldn't be externalized */
- curDepth = 0;
- this.preOrderPrint(writer, false, latestBranch.firstElement());
-
- if (printIntervals) {
- writer.println("\nDetails of intervals:"); //$NON-NLS-1$
- curDepth = 0;
- this.preOrderPrint(writer, true, latestBranch.firstElement());
- }
- writer.println('\n');
- }
-
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.channels.ClosedChannelException;
-import java.util.List;
-
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.IStateHistoryBackend;
-import org.eclipse.linuxtools.tmf.core.exceptions.StateSystemDisposedException;
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-import org.eclipse.linuxtools.tmf.core.interval.ITmfStateInterval;
-import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
-import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
-
-/**
- * History Tree backend for storing a state history. This is the basic version
- * that runs in the same thread as the class creating it.
- *
- * @author alexmont
- *
- */
-public class HistoryTreeBackend implements IStateHistoryBackend {
-
- /** The history tree that sits underneath */
- protected final HistoryTree sht;
-
- /** Direct reference to the tree's IO object */
- private final HT_IO treeIO;
-
- /** Indicates if the history tree construction is done */
- protected boolean isFinishedBuilding = false;
-
- /**
- * Construtor for new history files. Use this when creating a new history
- * from scratch.
- *
- * @param newStateFile
- * The filename/location where to store the state history (Should
- * end in .ht)
- * @param blockSize
- * The size of the blocks in the history file. This should be a
- * multiple of 4096.
- * @param maxChildren
- * The maximum number of children each core node can have
- * @param startTime
- * The earliest time stamp that will be stored in the history
- * @throws IOException
- * Thrown if we can't create the file for some reason
- */
- public HistoryTreeBackend(File newStateFile, int blockSize,
- int maxChildren, long startTime) throws IOException {
- sht = new HistoryTree(newStateFile, blockSize, maxChildren, startTime);
- treeIO = sht.getTreeIO();
- }
-
- /**
- * Construtor for new history files. Use this when creating a new history
- * from scratch. This version supplies sane defaults for the configuration
- * parameters.
- *
- * @param newStateFile
- * The filename/location where to store the state history (Should
- * end in .ht)
- * @param startTime
- * The earliest time stamp that will be stored in the history
- * @throws IOException
- * Thrown if we can't create the file for some reason
- */
- public HistoryTreeBackend(File newStateFile, long startTime)
- throws IOException {
- this(newStateFile, 64 * 1024, 50, startTime);
- }
-
- /**
- * Existing history constructor. Use this to open an existing state-file.
- *
- * @param existingStateFile
- * Filename/location of the history we want to load
- * @throws IOException
- * If we can't read the file, if it doesn't exist or is not
- * recognized
- */
- public HistoryTreeBackend(File existingStateFile) throws IOException {
- sht = new HistoryTree(existingStateFile);
- treeIO = sht.getTreeIO();
- isFinishedBuilding = true;
- }
-
- @Override
- public long getStartTime() {
- return sht.getTreeStart();
- }
-
- @Override
- public long getEndTime() {
- return sht.getTreeEnd();
- }
-
- @Override
- public void insertPastState(long stateStartTime, long stateEndTime,
- int quark, ITmfStateValue value) throws TimeRangeException {
- HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
- quark, (TmfStateValue) value);
-
- /* Start insertions at the "latest leaf" */
- sht.insertInterval(interval);
- }
-
- @Override
- public void finishedBuilding(long endTime) {
- sht.closeTree(endTime);
- isFinishedBuilding = true;
- }
-
- @Override
- public FileInputStream supplyAttributeTreeReader() {
- return treeIO.supplyATReader();
- }
-
- @Override
- public File supplyAttributeTreeWriterFile() {
- return treeIO.supplyATWriterFile();
- }
-
- @Override
- public long supplyAttributeTreeWriterFilePosition() {
- return treeIO.supplyATWriterFilePos();
- }
-
- @Override
- public void removeFiles() {
- treeIO.deleteFile();
- }
-
- @Override
- public void dispose() {
- if (isFinishedBuilding) {
- treeIO.closeFile();
- } else {
- /*
- * The build is being interrupted, delete the file we partially
- * built since it won't be complete, so shouldn't be re-used in the
- * future (.deleteFile() will close the file first)
- */
- treeIO.deleteFile();
- }
- }
-
- @Override
- public void doQuery(List<ITmfStateInterval> stateInfo, long t)
- throws TimeRangeException, StateSystemDisposedException {
- if (!checkValidTime(t)) {
- /* We can't possibly have information about this query */
- throw new TimeRangeException();
- }
-
- /* We start by reading the information in the root node */
- // FIXME using CoreNode for now, we'll have to redo this part to handle
- // different node types
- CoreNode currentNode = sht.latestBranch.firstElement();
- currentNode.writeInfoFromNode(stateInfo, t);
-
- /* Then we follow the branch down in the relevant children */
- try {
- while (currentNode.getNbChildren() > 0) {
- currentNode = (CoreNode) sht.selectNextChild(currentNode, t);
- currentNode.writeInfoFromNode(stateInfo, t);
- }
- } catch (ClosedChannelException e) {
- throw new StateSystemDisposedException();
- }
-
- /*
- * The stateInfo should now be filled with everything needed, we pass
- * the control back to the State System.
- */
- return;
- }
-
- @Override
- public ITmfStateInterval doSingularQuery(long t, int attributeQuark)
- throws TimeRangeException, StateSystemDisposedException {
- return getRelevantInterval(t, attributeQuark);
- }
-
- @Override
- public boolean checkValidTime(long t) {
- return (t >= sht.getTreeStart() && t <= sht.getTreeEnd());
- }
-
- /**
- * Inner method to find the interval in the tree containing the requested
- * key/timestamp pair, wherever in which node it is.
- *
- * @param t
- * @param key
- * @return The node containing the information we want
- */
- private HTInterval getRelevantInterval(long t, int key)
- throws TimeRangeException, StateSystemDisposedException {
- if (!checkValidTime(t)) {
- throw new TimeRangeException();
- }
-
- // FIXME using CoreNode for now, we'll have to redo this part to handle
- // different node types
- CoreNode currentNode = sht.latestBranch.firstElement();
- HTInterval interval = currentNode.getRelevantInterval(key, t);
-
- try {
- while (interval == null && currentNode.getNbChildren() > 0) {
- currentNode = (CoreNode) sht.selectNextChild(currentNode, t);
- interval = currentNode.getRelevantInterval(key, t);
- }
- } catch (ClosedChannelException e) {
- throw new StateSystemDisposedException();
- }
- /*
- * Since we should now have intervals at every attribute/timestamp
- * combination, it should NOT be null here.
- */
- assert (interval != null);
- return interval;
- }
-
- /**
- * Return the size of the tree history file
- *
- * @return The current size of the history file in bytes
- */
- public long getFileSize() {
- return sht.getFileSize();
- }
-
- /**
- * Return the current depth of the tree, ie the number of node levels.
- *
- * @return The tree depth
- */
- public int getTreeDepth() {
- return sht.latestBranch.size();
- }
-
- /**
- * Return the average node usage as a percentage (between 0 and 100)
- *
- * @return Average node usage %
- */
- public int getAverageNodeUsage() {
- HTNode node;
- long total = 0;
- long ret;
-
- try {
- for (int seq = 0; seq < sht.getNodeCount(); seq++) {
- node = treeIO.readNode(seq);
- total += node.getNodeUsagePRC();
- }
- } catch (ClosedChannelException e) {
- e.printStackTrace();
- }
-
- ret = total / sht.getNodeCount();
- assert (ret >= 0 && ret <= 100);
- return (int) ret;
- }
-
- @Override
- public void debugPrint(PrintWriter writer) {
- /* By default don't print out all the intervals */
- this.debugPrint(writer, false);
- }
-
- /**
- * The basic debugPrint method will print the tree structure, but not their
- * contents.
- *
- * This method here print the contents (the intervals) as well.
- *
- * @param writer
- * The PrintWriter to which the debug info will be written
- * @param printIntervals
- * Should we also print every contained interval individually?
- */
- public void debugPrint(PrintWriter writer, boolean printIntervals) {
- /* Only used for debugging, shouldn't be externalized */
- writer.println("------------------------------"); //$NON-NLS-1$
- writer.println("State History Tree:\n"); //$NON-NLS-1$
- writer.println(sht.toString());
- writer.println("Average node utilization: " //$NON-NLS-1$
- + this.getAverageNodeUsage());
- writer.println(""); //$NON-NLS-1$
-
- sht.debugPrintFullTree(writer, printIntervals);
- }
-}
+++ /dev/null
-/*******************************************************************************
- * Copyright (c) 2012 Ericsson
- * Copyright (c) 2010, 2011 École Polytechnique de Montréal
- * Copyright (c) 2010, 2011 Alexandre Montplaisir <alexandre.montplaisir@gmail.com>
- *
- * All rights reserved. This program and the accompanying materials are
- * made available under the terms of the Eclipse Public License v1.0 which
- * accompanies this distribution, and is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- *******************************************************************************/
-
-package org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-
-import org.eclipse.linuxtools.tmf.core.event.TmfTimestamp;
-import org.eclipse.linuxtools.tmf.core.exceptions.TimeRangeException;
-import org.eclipse.linuxtools.tmf.core.statevalue.ITmfStateValue;
-import org.eclipse.linuxtools.tmf.core.statevalue.TmfStateValue;
-
-/**
- * Variant of the HistoryTreeBackend which runs all the interval-insertion logic
- * in a separate thread.
- *
- * @author alexmont
- *
- */
-public final class ThreadedHistoryTreeBackend extends HistoryTreeBackend
- implements Runnable {
-
- /*
- * From superclass:
- *
- * protected final StateHistoryTree sht;
- */
-
- private BlockingQueue<HTInterval> intervalQueue;
- private final Thread shtThread;
-
- /**
- * New state history constructor
- *
- * Note that it usually doesn't make sense to use a Threaded HT if you're
- * opening an existing state-file, but you know what you're doing...
- *
- * @param newStateFile
- * The name of the history file that will be created. Should end
- * in ".ht"
- * @param blockSize
- * The size of the blocks in the file
- * @param maxChildren
- * The maximum number of children allowed for each core node
- * @param startTime
- * The earliest timestamp stored in the history
- * @param queueSize
- * The size of the interval insertion queue. 2000 - 10000 usually
- * works well
- * @throws IOException
- * If there was a problem opening the history file for writing
- */
- public ThreadedHistoryTreeBackend(File newStateFile, int blockSize,
- int maxChildren, long startTime, int queueSize) throws IOException {
- super(newStateFile, blockSize, maxChildren, startTime);
-
- intervalQueue = new ArrayBlockingQueue<HTInterval>(queueSize);
- shtThread = new Thread(this, "History Tree Thread"); //$NON-NLS-1$
- shtThread.start();
- }
-
- /**
- * New State History constructor. This version provides default values for
- * blockSize and maxChildren.
- *
- * @param newStateFile
- * The name of the history file that will be created. Should end
- * in ".ht"
- * @param startTime
- * The earliest timestamp stored in the history
- * @param queueSize
- * The size of the interval insertion queue. 2000 - 10000 usually
- * works well
- * @throws IOException
- * If there was a problem opening the history file for writing
- */
- public ThreadedHistoryTreeBackend(File newStateFile, long startTime,
- int queueSize) throws IOException {
- super(newStateFile, startTime);
-
- intervalQueue = new ArrayBlockingQueue<HTInterval>(queueSize);
- shtThread = new Thread(this, "History Tree Thread"); //$NON-NLS-1$
- shtThread.start();
- }
-
- /*
- * The Threaded version does not specify an "existing file" constructor,
- * since the history is already built (and we only use the other thread
- * during building). Just use a plain HistoryTreeProvider in this case.
- *
- * TODO but what about streaming??
- */
-
- @Override
- public void insertPastState(long stateStartTime, long stateEndTime,
- int quark, ITmfStateValue value) throws TimeRangeException {
- /*
- * Here, instead of directly inserting the elements in the History Tree
- * underneath, we'll put them in the Queue. They will then be taken and
- * processed by the other thread executing the run() method.
- */
- HTInterval interval = new HTInterval(stateStartTime, stateEndTime,
- quark, (TmfStateValue) value);
- try {
- intervalQueue.put(interval);
- } catch (InterruptedException e) {
- /* We should not get interrupted here */
- System.out.println("State system got interrupted!"); //$NON-NLS-1$
- e.printStackTrace();
- }
- }
-
- @Override
- public void finishedBuilding(long endTime) {
- /*
- * We need to commit everything in the History Tree and stop the
- * standalone thread before returning to the StateHistorySystem. (SHS
- * will then write the Attribute Tree to the file, that must not happen
- * at the same time we are writing the last nodes!)
- */
-
- stopRunningThread(endTime);
- isFinishedBuilding = true;
- return;
- }
-
- @Override
- public void dispose() {
- if (!isFinishedBuilding) {
- stopRunningThread(TmfTimestamp.PROJECT_IS_CANNED.getValue());
- }
- /*
- * isFinishedBuilding remains false, so the superclass will ask the
- * back-end to delete the file.
- */
- super.dispose();
- }
-
- private void stopRunningThread(long endTime) {
- if (!shtThread.isAlive()) {
- return;
- }
-
- /*
- * Send a "poison pill" in the queue, then wait for the HT to finish
- * its closeTree()
- */
- try {
- HTInterval pill = new HTInterval(-1, endTime, -1, TmfStateValue.nullValue());
- intervalQueue.put(pill);
- shtThread.join();
- } catch (TimeRangeException e) {
- e.printStackTrace();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- @Override
- public void run() {
- if (intervalQueue == null) {
- System.err.println("Cannot start the storage backend without its interval queue."); //$NON-NLS-1$
- return;
- }
- HTInterval currentInterval;
- try {
- currentInterval = intervalQueue.take();
- while (currentInterval.getStartTime() != -1) {
- /* Send the interval to the History Tree */
- sht.insertInterval(currentInterval);
- currentInterval = intervalQueue.take();
- }
- assert (currentInterval.getAttribute() == -1);
- /*
- * We've been told we're done, let's write down everything and quit.
- * The end time of this "signal interval" is actually correct.
- */
- sht.closeTree(currentInterval.getEndTime());
- return;
- } catch (InterruptedException e) {
- /* We've been interrupted abnormally */
- System.out.println("State History Tree interrupted!"); //$NON-NLS-1$
- e.printStackTrace();
- } catch (TimeRangeException e) {
- /* This also should not happen */
- e.printStackTrace();
- }
- }
-
-}
--- /dev/null
+package org.eclipse.linuxtools.tmf.core.interval;
+
+import java.util.Comparator;
+
+/**
+ * Comparator for ITmfStateInterval, using their *end times*. Making intervals
+ * Comparable wouldn't be clear if it's using their start or end times (or maybe
+ * even values), so separate comparators are provided.
+ *
+ * @author Alexandre Montplaisir
+ * @since 2.0
+ */
+public class TmfIntervalEndComparator implements Comparator<ITmfStateInterval> {
+
+ @Override
+ public int compare(ITmfStateInterval o1, ITmfStateInterval o2) {
+ long e1 = o1.getEndTime();
+ long e2 = o2.getEndTime();
+
+ if (e1 < e2) {
+ return -1;
+ } else if (e1 > e2) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+}
import java.io.IOException;
import org.eclipse.linuxtools.internal.tmf.core.statesystem.HistoryBuilder;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.IStateHistoryBackend;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree.HistoryTreeBackend;
-import org.eclipse.linuxtools.internal.tmf.core.statesystem.historytree.ThreadedHistoryTreeBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.IStateHistoryBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.InMemoryBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.NullBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree.HistoryTreeBackend;
+import org.eclipse.linuxtools.internal.tmf.core.statesystem.backends.historytree.ThreadedHistoryTreeBackend;
import org.eclipse.linuxtools.tmf.core.component.TmfComponent;
import org.eclipse.linuxtools.tmf.core.exceptions.TmfTraceException;
}
return builder.getStateSystemQuerier();
}
+
+ /**
+ * Create a new state system using a null history back-end. This means that
+ * no history intervals will be saved anywhere, and as such only
+ * {@link ITmfStateSystem#queryOngoingState} will be available.
+ *
+ * This has to be built "manually" (which means you should call
+ * input.processEvent() to update the ongoing state of the state system).
+ *
+ * @param input
+ * The input plugin to build the history
+ * @return Reference to the history-less state system that got built
+ * @since 2.0
+ */
+ public static ITmfStateSystem newNullHistory(IStateChangeInput input) {
+ IStateHistoryBackend backend = new NullBackend();
+ HistoryBuilder builder = new HistoryBuilder(input, backend, true);
+ ITmfStateSystem ss = builder.getStateSystemQuerier();
+ return ss;
+ }
+
+ /**
+ * Create a new state system using in-memory interval storage. This should
+ * only be done for very small state system, and will be naturally limited
+ * to 2^31 intervals.
+ *
+ * This will block the caller while the construction is ongoing.
+ *
+ * @param input
+ * The state change input to use
+ * @param buildManually
+ * Set to true to block the caller and build without using TMF
+ * signals (for test programs most of the time). Use false if you
+ * are using the TMF facilities (experiments, etc.)
+ * @return Reference to the state system that just got built
+ * @since 2.0
+ */
+ public static ITmfStateSystem newInMemHistory(IStateChangeInput input,
+ boolean buildManually) {
+ IStateHistoryBackend backend = new InMemoryBackend(input.getStartTime());
+ HistoryBuilder builder = new HistoryBuilder(input, backend, buildManually);
+ return builder.getStateSystemQuerier();
+ }
}